Beispiel #1
0
            public override Query Rewrite(IndexReader reader, MultiTermQuery query)
            {
                FilteredTermEnum enumerator = query.GetEnum(reader);
                BooleanQuery     result     = new BooleanQuery(true);
                int count = 0;

                try
                {
                    do
                    {
                        Term t = enumerator.Term();
                        if (t != null)
                        {
                            TermQuery tq = new TermQuery(t);                             // found a match
                            tq.SetBoost(query.GetBoost() * enumerator.Difference());     // set the boost
                            result.Add(tq, BooleanClause.Occur.SHOULD);                  // add to query
                            count++;
                        }
                    }while (enumerator.Next());
                }
                finally
                {
                    enumerator.Close();
                }
                query.IncTotalNumberOfTerms(count);
                return(result);
            }
Beispiel #2
0
            public override Scorer Scorer(IndexReader reader, bool scoreDocsInOrder, bool topScorer)
            {
                if (Enclosing_Instance.terms.Count == 0)
                {
                    // optimize zero-term case
                    return(null);
                }

                TermPositions[] tps = new TermPositions[Enclosing_Instance.terms.Count];
                for (int i = 0; i < Enclosing_Instance.terms.Count; i++)
                {
                    TermPositions p = reader.TermPositions((Term)Enclosing_Instance.terms[i]);
                    if (p == null)
                    {
                        return(null);
                    }
                    tps[i] = p;
                }

                if (Enclosing_Instance.slop == 0)
                {
                    // optimize exact case
                    return(new ExactPhraseScorer(this, tps, Enclosing_Instance.GetPositions(), similarity, reader.Norms(Enclosing_Instance.field)));
                }
                else
                {
                    return(new SloppyPhraseScorer(this, tps, Enclosing_Instance.GetPositions(), similarity, Enclosing_Instance.slop, reader.Norms(Enclosing_Instance.field)));
                }
            }
Beispiel #3
0
        // inherit javadocs
        public virtual ScoreDocComparator NewComparator(IndexReader reader, System.String fieldname)
        {
            System.String        field        = String.Intern(fieldname);
            System.IComparable[] cachedValues = Mono.Lucene.Net.Search.FieldCache_Fields.DEFAULT.GetCustom(reader, field, this);

            return(new AnonymousClassScoreDocComparator(cachedValues, this));
        }
Beispiel #4
0
 /// <summary> Returns a comparator for sorting hits according to a field containing strings.</summary>
 /// <param name="reader"> Index to use.
 /// </param>
 /// <param name="fieldname"> Fieldable containing string values.
 /// </param>
 /// <returns>  Comparator for sorting hits.
 /// </returns>
 /// <throws>  IOException If an error occurs reading the index. </throws>
 internal static ScoreDocComparator comparatorStringLocale(IndexReader reader, System.String fieldname, System.Globalization.CultureInfo locale)
 {
     System.Globalization.CompareInfo collator = locale.CompareInfo;
     System.String   field = String.Intern(fieldname);
     System.String[] index = Mono.Lucene.Net.Search.FieldCache_Fields.DEFAULT.GetStrings(reader, field);
     return(new AnonymousClassScoreDocComparator7(index, collator));
 }
Beispiel #5
0
 /// <summary> Returns a comparator for sorting hits according to values in the given field.
 /// The terms in the field are looked at to determine whether they contain integers,
 /// floats or strings.  Once the type is determined, one of the other static methods
 /// in this class is called to get the comparator.
 /// </summary>
 /// <param name="reader"> Index to use.
 /// </param>
 /// <param name="fieldname"> Fieldable containing values.
 /// </param>
 /// <returns>  Comparator for sorting hits.
 /// </returns>
 /// <throws>  IOException If an error occurs reading the index. </throws>
 internal static ScoreDocComparator ComparatorAuto(IndexReader reader, System.String fieldname)
 {
     System.String field       = String.Intern(fieldname);
     System.Object lookupArray = Mono.Lucene.Net.Search.FieldCache_Fields.DEFAULT.GetAuto(reader, field);
     if (lookupArray is Mono.Lucene.Net.Search.StringIndex)
     {
         return(comparatorString(reader, field));
     }
     else if (lookupArray is int[])
     {
         return(comparatorInt(reader, field, null));
     }
     else if (lookupArray is long[])
     {
         return(comparatorLong(reader, field, null));
     }
     else if (lookupArray is float[])
     {
         return(comparatorFloat(reader, field, null));
     }
     else if (lookupArray is System.String[])
     {
         return(comparatorString(reader, field));
     }
     else
     {
         throw new System.SystemException("unknown data type in field '" + field + "'");
     }
 }
Beispiel #6
0
		/// <summary> Enumerates all terms greater/equal than <code>lowerTerm</code>
		/// but less/equal than <code>upperTerm</code>. 
		/// 
		/// If an endpoint is null, it is said to be "open". Either or both 
		/// endpoints may be open.  Open endpoints may not be exclusive 
		/// (you can't select all but the first or last term without 
		/// explicitly specifying the term to exclude.)
		/// 
		/// </summary>
		/// <param name="reader">
		/// </param>
		/// <param name="field">An interned field that holds both lower and upper terms.
		/// </param>
		/// <param name="lowerTermText">The term text at the lower end of the range
		/// </param>
		/// <param name="upperTermText">The term text at the upper end of the range
		/// </param>
		/// <param name="includeLower">If true, the <code>lowerTerm</code> is included in the range.
		/// </param>
		/// <param name="includeUpper">If true, the <code>upperTerm</code> is included in the range.
		/// </param>
		/// <param name="collator">The collator to use to collate index Terms, to determine their
		/// membership in the range bounded by <code>lowerTerm</code> and
		/// <code>upperTerm</code>.
		/// 
		/// </param>
		/// <throws>  IOException </throws>
		public TermRangeTermEnum(IndexReader reader, System.String field, System.String lowerTermText, System.String upperTermText, bool includeLower, bool includeUpper, System.Globalization.CompareInfo collator)
		{
			this.collator = collator;
			this.upperTermText = upperTermText;
			this.lowerTermText = lowerTermText;
			this.includeLower = includeLower;
			this.includeUpper = includeUpper;
			this.field = StringHelper.Intern(field);
			
			// do a little bit of normalization...
			// open ended range queries should always be inclusive.
			if (this.lowerTermText == null)
			{
				this.lowerTermText = "";
				this.includeLower = true;
			}
			
			if (this.upperTermText == null)
			{
				this.includeUpper = true;
			}
			
			System.String startTermText = collator == null?this.lowerTermText:"";
			SetEnum(reader.Terms(new Term(this.field, startTermText)));
		}
Beispiel #7
0
        /// <summary> Creates a hit queue sorted by the given list of fields.</summary>
        /// <param name="reader"> Index to use.
        /// </param>
        /// <param name="fields">Fieldable names, in priority order (highest priority first).  Cannot be <code>null</code> or empty.
        /// </param>
        /// <param name="size"> The number of hits to retain.  Must be greater than zero.
        /// </param>
        /// <throws>  IOException </throws>
        public FieldSortedHitQueue(IndexReader reader, SortField[] fields, int size)
        {
            int n = fields.Length;

            comparators = new ScoreDocComparator[n];
            this.fields = new SortField[n];
            for (int i = 0; i < n; ++i)
            {
                System.String fieldname = fields[i].GetField();
                comparators[i] = GetCachedComparator(reader, fieldname, fields[i].GetType(), fields[i].GetParser(), fields[i].GetLocale(), fields[i].GetFactory());
                // new SortField instances must only be created when auto-detection is in use
                if (fields[i].GetType() == SortField.AUTO)
                {
                    if (comparators[i].SortType() == SortField.STRING)
                    {
                        this.fields[i] = new SortField(fieldname, fields[i].GetLocale(), fields[i].GetReverse());
                    }
                    else
                    {
                        this.fields[i] = new SortField(fieldname, comparators[i].SortType(), fields[i].GetReverse());
                    }
                }
                else
                {
                    System.Diagnostics.Debug.Assert(comparators [i].SortType() == fields [i].GetType());
                    this.fields[i] = fields[i];
                }
            }
            Initialize(size);
        }
Beispiel #8
0
            protected internal override System.Object CreateValue(IndexReader reader, Entry entryKey)
            {
                System.String   field    = StringHelper.Intern((System.String)entryKey.field);
                System.String[] retArray = new System.String[reader.MaxDoc()];
                TermDocs        termDocs = reader.TermDocs();
                TermEnum        termEnum = reader.Terms(new Term(field));

                try
                {
                    do
                    {
                        Term term = termEnum.Term();
                        if (term == null || (System.Object)term.Field() != (System.Object)field)
                        {
                            break;
                        }
                        System.String termval = term.Text();
                        termDocs.Seek(termEnum);
                        while (termDocs.Next())
                        {
                            retArray[termDocs.Doc()] = termval;
                        }
                    }while (termEnum.Next());
                }
                finally
                {
                    termDocs.Close();
                    termEnum.Close();
                }
                return(retArray);
            }
Beispiel #9
0
 public void Purge(IndexReader r)
 {
     foreach (Cache c in caches.Values)
     {
         c.Purge(r);
     }
 }
Beispiel #10
0
        /// <summary> Constructor for enumeration of all terms from specified <code>reader</code> which share a prefix of
        /// length <code>prefixLength</code> with <code>term</code> and which have a fuzzy similarity &gt;
        /// <code>minSimilarity</code>.
        /// <p/>
        /// After calling the constructor the enumeration is already pointing to the first
        /// valid term if such a term exists.
        ///
        /// </summary>
        /// <param name="reader">Delivers terms.
        /// </param>
        /// <param name="term">Pattern term.
        /// </param>
        /// <param name="minSimilarity">Minimum required similarity for terms from the reader. Default value is 0.5f.
        /// </param>
        /// <param name="prefixLength">Length of required common prefix. Default value is 0.
        /// </param>
        /// <throws>  IOException </throws>
        public FuzzyTermEnum(IndexReader reader, Term term, float minSimilarity, int prefixLength) : base()
        {
            if (minSimilarity >= 1.0f)
            {
                throw new System.ArgumentException("minimumSimilarity cannot be greater than or equal to 1");
            }
            else if (minSimilarity < 0.0f)
            {
                throw new System.ArgumentException("minimumSimilarity cannot be less than 0");
            }
            if (prefixLength < 0)
            {
                throw new System.ArgumentException("prefixLength cannot be less than 0");
            }

            this.minimumSimilarity = minSimilarity;
            this.scale_factor      = 1.0f / (1.0f - minimumSimilarity);
            this.searchTerm        = term;
            this.field             = searchTerm.Field();

            //The prefix could be longer than the word.
            //It's kind of silly though.  It means we must match the entire word.
            int fullSearchTermLength = searchTerm.Text().Length;
            int realPrefixLength     = prefixLength > fullSearchTermLength?fullSearchTermLength:prefixLength;

            this.text   = searchTerm.Text().Substring(realPrefixLength);
            this.prefix = searchTerm.Text().Substring(0, (realPrefixLength) - (0));

            InitializeMaxDistances();
            this.d = InitDistanceArray();

            SetEnum(reader.Terms(new Term(searchTerm.Field(), prefix)));
        }
Beispiel #11
0
            /* Explain the score we computed for doc */
            public override Explanation Explain(IndexReader reader, int doc)
            {
                if (Enclosing_Instance.disjuncts.Count == 1)
                {
                    return(((Weight)weights[0]).Explain(reader, doc));
                }
                ComplexExplanation result = new ComplexExplanation();
                float max = 0.0f, sum = 0.0f;

                result.SetDescription(Enclosing_Instance.tieBreakerMultiplier == 0.0f?"max of:":"max plus " + Enclosing_Instance.tieBreakerMultiplier + " times others of:");
                for (System.Collections.IEnumerator iter = weights.GetEnumerator(); iter.MoveNext();)
                {
                    Explanation e = ((Weight)iter.Current).Explain(reader, doc);
                    if (e.IsMatch())
                    {
                        System.Boolean tempAux = true;
                        result.SetMatch(tempAux);
                        result.AddDetail(e);
                        sum += e.GetValue();
                        max  = System.Math.Max(max, e.GetValue());
                    }
                }
                result.SetValue(max + (sum - max) * Enclosing_Instance.tieBreakerMultiplier);
                return(result);
            }
Beispiel #12
0
        public override Query Rewrite(IndexReader reader)
        {
            SpanOrQuery clone = null;

            for (int i = 0; i < clauses.Count; i++)
            {
                SpanQuery c     = (SpanQuery)clauses[i];
                SpanQuery query = (SpanQuery)c.Rewrite(reader);
                if (query != c)
                {
                    // clause rewrote: must clone
                    if (clone == null)
                    {
                        clone = (SpanOrQuery)this.Clone();
                    }
                    clone.clauses[i] = query;
                }
            }
            if (clone != null)
            {
                return(clone);                // some clauses rewrote
            }
            else
            {
                return(this);                // no clauses rewrote
            }
        }
Beispiel #13
0
        public override Query Rewrite(IndexReader reader)
        {
            SpanNotQuery clone = null;

            SpanQuery rewrittenInclude = (SpanQuery)include.Rewrite(reader);

            if (rewrittenInclude != include)
            {
                clone         = (SpanNotQuery)this.Clone();
                clone.include = rewrittenInclude;
            }
            SpanQuery rewrittenExclude = (SpanQuery)exclude.Rewrite(reader);

            if (rewrittenExclude != exclude)
            {
                if (clone == null)
                {
                    clone = (SpanNotQuery)this.Clone();
                }
                clone.exclude = rewrittenExclude;
            }

            if (clone != null)
            {
                return(clone);                // some clauses rewrote
            }
            else
            {
                return(this);                // no clauses rewrote
            }
        }
Beispiel #14
0
        /*(non-Javadoc) @see Mono.Lucene.Net.Search.Query#rewrite(Mono.Lucene.Net.Index.IndexReader) */
        public override Query Rewrite(IndexReader reader)
        {
            CustomScoreQuery clone = null;

            Query sq = subQuery.Rewrite(reader);

            if (sq != subQuery)
            {
                clone          = (CustomScoreQuery)Clone();
                clone.subQuery = sq;
            }

            for (int i = 0; i < valSrcQueries.Length; i++)
            {
                ValueSourceQuery v = (ValueSourceQuery)valSrcQueries[i].Rewrite(reader);
                if (v != valSrcQueries[i])
                {
                    if (clone == null)
                    {
                        clone = (CustomScoreQuery)Clone();
                    }
                    clone.valSrcQueries[i] = v;
                }
            }

            return((clone == null) ? this : clone);
        }
Beispiel #15
0
            private Explanation DoExplain(IndexReader reader, int doc)
            {
                Scorer[] valSrcScorers = new Scorer[valSrcWeights.Length];
                for (int i = 0; i < valSrcScorers.Length; i++)
                {
                    valSrcScorers[i] = valSrcWeights[i].Scorer(reader, true, false);
                }
                Explanation subQueryExpl = subQueryWeight.Explain(reader, doc);

                if (!subQueryExpl.IsMatch())
                {
                    return(subQueryExpl);
                }
                // match
                Explanation[] valSrcExpls = new Explanation[valSrcScorers.Length];
                for (int i = 0; i < valSrcScorers.Length; i++)
                {
                    valSrcExpls[i] = valSrcScorers[i].Explain(doc);
                }
                Explanation customExp = Enclosing_Instance.GetCustomScoreProvider(reader).CustomExplain(doc, subQueryExpl, valSrcExpls);
                float       sc        = GetValue() * customExp.GetValue();
                Explanation res       = new ComplexExplanation(true, sc, Enclosing_Instance.ToString() + ", product of:");

                res.AddDetail(customExp);
                res.AddDetail(new Explanation(GetValue(), "queryBoost"));                 // actually using the q boost as q weight (== weight value)
                return(res);
            }
Beispiel #16
0
            public override Query Rewrite(IndexReader reader, MultiTermQuery query)
            {
                Query result = new ConstantScoreQuery(new MultiTermQueryWrapperFilter(query));

                result.SetBoost(query.GetBoost());
                return(result);
            }
Beispiel #17
0
        public override System.Collections.BitArray Bits(IndexReader reader)
        {
            System.Collections.BitArray bits = new System.Collections.BitArray((reader.MaxDoc() % 64 == 0?reader.MaxDoc() / 64:reader.MaxDoc() / 64 + 1) * 64);

            new IndexSearcher(reader).Search(query, new AnonymousClassCollector(bits, this));
            return(bits);
        }
Beispiel #18
0
            public override DocIdSet GetDocIdSet(IndexReader reader)
            {
                Mono.Lucene.Net.Search.StringIndex fcsi = Mono.Lucene.Net.Search.FieldCache_Fields.DEFAULT.GetStringIndex(reader, field);
                int lowerPoint = fcsi.BinarySearchLookup((System.String)lowerVal);
                int upperPoint = fcsi.BinarySearchLookup((System.String)upperVal);

                int inclusiveLowerPoint;
                int inclusiveUpperPoint;

                // Hints:
                // * binarySearchLookup returns 0, if value was null.
                // * the value is <0 if no exact hit was found, the returned value
                //   is (-(insertion point) - 1)
                if (lowerPoint == 0)
                {
                    System.Diagnostics.Debug.Assert(lowerVal == null);
                    inclusiveLowerPoint = 1;
                }
                else if (includeLower && lowerPoint > 0)
                {
                    inclusiveLowerPoint = lowerPoint;
                }
                else if (lowerPoint > 0)
                {
                    inclusiveLowerPoint = lowerPoint + 1;
                }
                else
                {
                    inclusiveLowerPoint = System.Math.Max(1, -lowerPoint - 1);
                }

                if (upperPoint == 0)
                {
                    System.Diagnostics.Debug.Assert(upperVal == null);
                    inclusiveUpperPoint = System.Int32.MaxValue;
                }
                else if (includeUpper && upperPoint > 0)
                {
                    inclusiveUpperPoint = upperPoint;
                }
                else if (upperPoint > 0)
                {
                    inclusiveUpperPoint = upperPoint - 1;
                }
                else
                {
                    inclusiveUpperPoint = -upperPoint - 2;
                }

                if (inclusiveUpperPoint <= 0 || inclusiveLowerPoint > inclusiveUpperPoint)
                {
                    return(DocIdSet.EMPTY_DOCIDSET);
                }

                System.Diagnostics.Debug.Assert(inclusiveLowerPoint > 0 && inclusiveUpperPoint > 0);

                // for this DocIdSet, we never need to use TermDocs,
                // because deleted docs have an order of 0 (null entry in StringIndex)
                return(new AnonymousClassFieldCacheDocIdSet(fcsi, inclusiveLowerPoint, inclusiveUpperPoint, this, reader, false));
            }
Beispiel #19
0
			internal MatchAllScorer(MatchAllDocsQuery enclosingInstance, IndexReader reader, Similarity similarity, Weight w, byte[] norms):base(similarity)
			{
				InitBlock(enclosingInstance);
				this.termDocs = reader.TermDocs(null);
				score = w.GetValue();
				this.norms = norms;
			}
Beispiel #20
0
 public void Purge(IndexReader r)
 {
     foreach (Cache c in caches.Values)
     {
         c.Purge(r);
     }
 }
Beispiel #21
0
        /// <summary> Enumerates all terms greater/equal than <code>lowerTerm</code>
        /// but less/equal than <code>upperTerm</code>.
        ///
        /// If an endpoint is null, it is said to be "open". Either or both
        /// endpoints may be open.  Open endpoints may not be exclusive
        /// (you can't select all but the first or last term without
        /// explicitly specifying the term to exclude.)
        ///
        /// </summary>
        /// <param name="reader">
        /// </param>
        /// <param name="field">An interned field that holds both lower and upper terms.
        /// </param>
        /// <param name="lowerTermText">The term text at the lower end of the range
        /// </param>
        /// <param name="upperTermText">The term text at the upper end of the range
        /// </param>
        /// <param name="includeLower">If true, the <code>lowerTerm</code> is included in the range.
        /// </param>
        /// <param name="includeUpper">If true, the <code>upperTerm</code> is included in the range.
        /// </param>
        /// <param name="collator">The collator to use to collate index Terms, to determine their
        /// membership in the range bounded by <code>lowerTerm</code> and
        /// <code>upperTerm</code>.
        ///
        /// </param>
        /// <throws>  IOException </throws>
        public TermRangeTermEnum(IndexReader reader, System.String field, System.String lowerTermText, System.String upperTermText, bool includeLower, bool includeUpper, System.Globalization.CompareInfo collator)
        {
            this.collator      = collator;
            this.upperTermText = upperTermText;
            this.lowerTermText = lowerTermText;
            this.includeLower  = includeLower;
            this.includeUpper  = includeUpper;
            this.field         = StringHelper.Intern(field);

            // do a little bit of normalization...
            // open ended range queries should always be inclusive.
            if (this.lowerTermText == null)
            {
                this.lowerTermText = "";
                this.includeLower  = true;
            }

            if (this.upperTermText == null)
            {
                this.includeUpper = true;
            }

            System.String startTermText = collator == null?this.lowerTermText:"";
            SetEnum(reader.Terms(new Term(this.field, startTermText)));
        }
Beispiel #22
0
 /// <summary> Returns sub-reader subIndex from reader.
 ///
 /// </summary>
 /// <param name="reader">parent reader
 /// </param>
 /// <param name="subIndex">index of desired sub reader
 /// </param>
 /// <returns> the subreader at subINdex
 /// </returns>
 public static IndexReader SubReader(IndexReader reader, int subIndex)
 {
     System.Collections.ArrayList subReadersList = new System.Collections.ArrayList();
     ReaderUtil.GatherSubReaders(subReadersList, reader);
     IndexReader[] subReaders = (IndexReader[])subReadersList.ToArray(typeof(IndexReader));
     return(subReaders[subIndex]);
 }
Beispiel #23
0
 public override void  SetNextReader(IndexReader reader, int docBase)
 {
     // TODO: can we "map" our docIDs to the current
     // reader? saves having to then subtract on every
     // compare call
     this.docBase = docBase;
 }
Beispiel #24
0
            public override Query Rewrite(IndexReader reader, MultiTermQuery query)
            {
                // Get the enum and start visiting terms.  If we
                // exhaust the enum before hitting either of the
                // cutoffs, we use ConstantBooleanQueryRewrite; else,
                // ConstantFilterRewrite:
                System.Collections.ArrayList pendingTerms = new System.Collections.ArrayList();
                int docCountCutoff = (int)((docCountPercent / 100.0) * reader.MaxDoc());
                int termCountLimit = System.Math.Min(BooleanQuery.GetMaxClauseCount(), termCountCutoff);
                int docVisitCount  = 0;

                FilteredTermEnum enumerator = query.GetEnum(reader);

                try
                {
                    while (true)
                    {
                        Term t = enumerator.Term();
                        if (t != null)
                        {
                            pendingTerms.Add(t);
                            // Loading the TermInfo from the terms dict here
                            // should not be costly, because 1) the
                            // query/filter will load the TermInfo when it
                            // runs, and 2) the terms dict has a cache:
                            docVisitCount += reader.DocFreq(t);
                        }

                        if (pendingTerms.Count >= termCountLimit || docVisitCount >= docCountCutoff)
                        {
                            // Too many terms -- make a filter.
                            Query result = new ConstantScoreQuery(new MultiTermQueryWrapperFilter(query));
                            result.SetBoost(query.GetBoost());
                            return(result);
                        }
                        else if (!enumerator.Next())
                        {
                            // Enumeration is done, and we hit a small
                            // enough number of terms & docs -- just make a
                            // BooleanQuery, now
                            System.Collections.IEnumerator it = pendingTerms.GetEnumerator();
                            BooleanQuery bq = new BooleanQuery(true);
                            while (it.MoveNext())
                            {
                                TermQuery tq = new TermQuery((Term)it.Current);
                                bq.Add(tq, BooleanClause.Occur.SHOULD);
                            }
                            // Strip scores
                            Query result = new ConstantScoreQuery(new QueryWrapperFilter(bq));
                            result.SetBoost(query.GetBoost());
                            query.IncTotalNumberOfTerms(pendingTerms.Count);
                            return(result);
                        }
                    }
                }
                finally
                {
                    enumerator.Close();
                }
            }
Beispiel #25
0
		public override Query Rewrite(IndexReader reader)
		{
			if (!termContainsWildcard)
				return new TermQuery(GetTerm());
			else
				return base.Rewrite(reader);
		}
Beispiel #26
0
            public override Explanation Explain(IndexReader reader, int doc)
            {
                ConstantScorer cs     = new ConstantScorer(enclosingInstance, similarity, reader, this);
                bool           exists = cs.docIdSetIterator.Advance(doc) == doc;

                ComplexExplanation result = new ComplexExplanation();

                if (exists)
                {
                    result.SetDescription("ConstantScoreQuery(" + Enclosing_Instance.filter + "), product of:");
                    result.SetValue(queryWeight);
                    System.Boolean tempAux = true;
                    result.SetMatch(tempAux);
                    result.AddDetail(new Explanation(Enclosing_Instance.GetBoost(), "boost"));
                    result.AddDetail(new Explanation(queryNorm, "queryNorm"));
                }
                else
                {
                    result.SetDescription("ConstantScoreQuery(" + Enclosing_Instance.filter + ") doesn't match id " + doc);
                    result.SetValue(0);
                    System.Boolean tempAux2 = false;
                    result.SetMatch(tempAux2);
                }
                return(result);
            }
Beispiel #27
0
 internal MatchAllScorer(MatchAllDocsQuery enclosingInstance, IndexReader reader, Similarity similarity, Weight w, byte[] norms) : base(similarity)
 {
     InitBlock(enclosingInstance);
     this.termDocs = reader.TermDocs(null);
     score         = w.GetValue();
     this.norms    = norms;
 }
Beispiel #28
0
        public override DocIdSet GetDocIdSet(IndexReader reader)
        {
            object coreKey    = reader.GetFieldCacheKey();
            object delCoreKey = reader.HasDeletions() ? reader.GetDeletesCacheKey() : coreKey;

            object cached = cache.Get(reader, coreKey, delCoreKey);

            if (cached != null)
            {
                hitCount++;
                if (cached is DocIdSet)
                {
                    return((DocIdSet)cached);
                }
                else
                {
                    return(new DocIdBitSet((System.Collections.BitArray)cached));
                }
            }
            missCount++;
            // cache miss
            DocIdSet docIdSet = DocIdSetToCache(filter.GetDocIdSet(reader), reader);

            if (docIdSet != null)
            {
                cache.Put(coreKey, delCoreKey, docIdSet);
            }

            return(docIdSet);
        }
Beispiel #29
0
        public override System.Collections.BitArray Bits(IndexReader reader)
        {
            object coreKey    = reader.GetFieldCacheKey();
            object delCoreKey = reader.HasDeletions() ? reader.GetDeletesCacheKey() : coreKey;

            object cached = cache.Get(reader, coreKey, delCoreKey);

            if (cached != null)
            {
                if (cached is System.Collections.BitArray)
                {
                    return((System.Collections.BitArray)cached);
                }
                else if (cached is DocIdBitSet)
                {
                    return(((DocIdBitSet)cached).GetBitSet());
                }
                // It would be nice to handle the DocIdSet case, but that's not really possible
            }

            System.Collections.BitArray bits = filter.Bits(reader);

            if (bits != null)
            {
                cache.Put(coreKey, delCoreKey, bits);
            }

            return(bits);
        }
Beispiel #30
0
            public override Explanation Explain(IndexReader ir, int i)
            {
                Explanation inner = weight.Explain(ir, i);

                if (Enclosing_Instance.GetBoost() != 1)
                {
                    Explanation preBoost = inner;
                    inner = new Explanation(inner.GetValue() * Enclosing_Instance.GetBoost(), "product of:");
                    inner.AddDetail(new Explanation(Enclosing_Instance.GetBoost(), "boost"));
                    inner.AddDetail(preBoost);
                }
                Filter           f                = Enclosing_Instance.filter;
                DocIdSet         docIdSet         = f.GetDocIdSet(ir);
                DocIdSetIterator docIdSetIterator = docIdSet == null?DocIdSet.EMPTY_DOCIDSET.Iterator() : docIdSet.Iterator();

                if (docIdSetIterator == null)
                {
                    docIdSetIterator = DocIdSet.EMPTY_DOCIDSET.Iterator();
                }
                if (docIdSetIterator.Advance(i) == i)
                {
                    return(inner);
                }
                else
                {
                    Explanation result = new Explanation(0.0f, "failure to match filter: " + f.ToString());
                    result.AddDetail(inner);
                    return(result);
                }
            }
Beispiel #31
0
            protected internal override System.Object CreateValue(IndexReader reader, Entry entryKey)
            {
                Entry entry = (Entry)entryKey;

                System.String  field      = entry.field;
                SortComparator comparator = (SortComparator)entry.custom;

                System.IComparable[] retArray = new System.IComparable[reader.MaxDoc()];
                TermDocs             termDocs = reader.TermDocs();
                TermEnum             termEnum = reader.Terms(new Term(field));

                try
                {
                    do
                    {
                        Term term = termEnum.Term();
                        if (term == null || (System.Object)term.Field() != (System.Object)field)
                        {
                            break;
                        }
                        System.IComparable termval = comparator.GetComparable(term.Text());
                        termDocs.Seek(termEnum);
                        while (termDocs.Next())
                        {
                            retArray[termDocs.Doc()] = termval;
                        }
                    }while (termEnum.Next());
                }
                finally
                {
                    termDocs.Close();
                    termEnum.Close();
                }
                return(retArray);
            }
Beispiel #32
0
            protected internal override System.Object CreateValue(IndexReader reader, Entry entryKey)
            {
                Entry entry = (Entry)entryKey;

                System.String field  = entry.field;
                FloatParser   parser = (FloatParser)entry.custom;

                if (parser == null)
                {
                    try
                    {
                        return(wrapper.GetFloats(reader, field, Mono.Lucene.Net.Search.FieldCache_Fields.DEFAULT_FLOAT_PARSER));
                    }
                    catch (System.FormatException ne)
                    {
                        return(wrapper.GetFloats(reader, field, Mono.Lucene.Net.Search.FieldCache_Fields.NUMERIC_UTILS_FLOAT_PARSER));
                    }
                }
                float[]  retArray = null;
                TermDocs termDocs = reader.TermDocs();
                TermEnum termEnum = reader.Terms(new Term(field));

                try
                {
                    do
                    {
                        Term term = termEnum.Term();
                        if (term == null || (System.Object)term.Field() != (System.Object)field)
                        {
                            break;
                        }
                        float termval = parser.ParseFloat(term.Text());
                        if (retArray == null)
                        {
                            // late init
                            retArray = new float[reader.MaxDoc()];
                        }
                        termDocs.Seek(termEnum);
                        while (termDocs.Next())
                        {
                            retArray[termDocs.Doc()] = termval;
                        }
                    }while (termEnum.Next());
                }
                catch (StopFillCacheException stop)
                {
                }
                finally
                {
                    termDocs.Close();
                    termEnum.Close();
                }
                if (retArray == null)
                {
                    // no values
                    retArray = new float[reader.MaxDoc()];
                }
                return(retArray);
            }
Beispiel #33
0
 public override void  SetNextReader(IndexReader reader, int docBase)
 {
     this.docBase = docBase;
     for (int i = 0; i < comparators.Length; i++)
     {
         comparators[i].SetNextReader(reader, docBase);
     }
 }
Beispiel #34
0
            public override Query Rewrite(IndexReader reader, MultiTermQuery query)
            {
                // strip the scores off
                Query result = new ConstantScoreQuery(new QueryWrapperFilter(base.Rewrite(reader, query)));

                result.SetBoost(query.GetBoost());
                return(result);
            }
Beispiel #35
0
		public override Explanation Explain(IndexReader reader, int doc)
		{
			
			ComplexExplanation result = new ComplexExplanation();
			result.SetDescription("weight(" + GetQuery() + " in " + doc + "), product of:");
			System.String field = ((SpanQuery) GetQuery()).GetField();
			
			Explanation idfExpl = new Explanation(idf, "idf(" + field + ": " + idfExp.Explain() + ")");
			
			// explain query weight
			Explanation queryExpl = new Explanation();
			queryExpl.SetDescription("queryWeight(" + GetQuery() + "), product of:");
			
			Explanation boostExpl = new Explanation(GetQuery().GetBoost(), "boost");
			if (GetQuery().GetBoost() != 1.0f)
				queryExpl.AddDetail(boostExpl);
			queryExpl.AddDetail(idfExpl);
			
			Explanation queryNormExpl = new Explanation(queryNorm, "queryNorm");
			queryExpl.AddDetail(queryNormExpl);
			
			queryExpl.SetValue(boostExpl.GetValue() * idfExpl.GetValue() * queryNormExpl.GetValue());
			
			result.AddDetail(queryExpl);
			
			// explain field weight
			ComplexExplanation fieldExpl = new ComplexExplanation();
			fieldExpl.SetDescription("fieldWeight(" + field + ":" + query.ToString(field) + " in " + doc + "), product of:");
			
			Explanation tfExpl = Scorer(reader, true, false).Explain(doc);
			fieldExpl.AddDetail(tfExpl);
			fieldExpl.AddDetail(idfExpl);
			
			Explanation fieldNormExpl = new Explanation();
			byte[] fieldNorms = reader.Norms(field);
			float fieldNorm = fieldNorms != null?Similarity.DecodeNorm(fieldNorms[doc]):1.0f;
			fieldNormExpl.SetValue(fieldNorm);
			fieldNormExpl.SetDescription("fieldNorm(field=" + field + ", doc=" + doc + ")");
			fieldExpl.AddDetail(fieldNormExpl);
			
			fieldExpl.SetMatch(tfExpl.IsMatch());
			fieldExpl.SetValue(tfExpl.GetValue() * idfExpl.GetValue() * fieldNormExpl.GetValue());
			
			result.AddDetail(fieldExpl);
			System.Boolean? tempAux = fieldExpl.GetMatch();
			result.SetMatch(tempAux);
			
			// combine them
			result.SetValue(queryExpl.GetValue() * fieldExpl.GetValue());
			
			if (queryExpl.GetValue() == 1.0f)
				return fieldExpl;
			
			return result;
		}
Beispiel #36
0
		/// <summary> Returns sub IndexReader that contains the given document id.
		/// 
		/// </summary>
		/// <param name="doc">id of document
		/// </param>
		/// <param name="reader">parent reader
		/// </param>
		/// <returns> sub reader of parent which contains the specified doc id
		/// </returns>
		public static IndexReader SubReader(int doc, IndexReader reader)
		{
			System.Collections.ArrayList subReadersList = new System.Collections.ArrayList();
			ReaderUtil.GatherSubReaders(subReadersList, reader);
			IndexReader[] subReaders = (IndexReader[]) subReadersList.ToArray(typeof(IndexReader));
			int[] docStarts = new int[subReaders.Length];
			int maxDoc = 0;
			for (int i = 0; i < subReaders.Length; i++)
			{
				docStarts[i] = maxDoc;
				maxDoc += subReaders[i].MaxDoc();
			}
			return subReaders[ReaderUtil.SubIndex(doc, docStarts)];
		}
Beispiel #37
0
		public override DocValues GetValues(IndexReader reader)
		{
			
			IndexReader[] subReaders = reader.GetSequentialSubReaders();
			if (subReaders != null)
			{
				// This is a composite reader
				return new MultiDocValues(this, subReaders);
			}
			else
			{
				// Already an atomic reader -- just delegate
				return other.GetValues(reader);
			}
		}
Beispiel #38
0
		/// <summary> Gathers sub-readers from reader into a List.
		/// 
		/// </summary>
		/// <param name="allSubReaders">
		/// </param>
		/// <param name="reader">
		/// </param>
		public static void  GatherSubReaders(System.Collections.IList allSubReaders, IndexReader reader)
		{
			IndexReader[] subReaders = reader.GetSequentialSubReaders();
			if (subReaders == null)
			{
				// Add the reader itself, and do not recurse
				allSubReaders.Add(reader);
			}
			else
			{
				for (int i = 0; i < subReaders.Length; i++)
				{
					GatherSubReaders(allSubReaders, subReaders[i]);
				}
			}
		}
Beispiel #39
0
		public override SpanFilterResult BitSpans(IndexReader reader)
		{
			
			OpenBitSet bits = new OpenBitSet(reader.MaxDoc());
			Mono.Lucene.Net.Search.Spans.Spans spans = query.GetSpans(reader);
			System.Collections.IList tmp = new System.Collections.ArrayList(20);
			int currentDoc = - 1;
			SpanFilterResult.PositionInfo currentInfo = null;
			while (spans.Next())
			{
				int doc = spans.Doc();
				bits.Set(doc);
				if (currentDoc != doc)
				{
					currentInfo = new SpanFilterResult.PositionInfo(doc);
					tmp.Add(currentInfo);
					currentDoc = doc;
				}
				currentInfo.AddPosition(spans.Start(), spans.End());
			}
			return new SpanFilterResult(bits, tmp);
		}
Beispiel #40
0
		/// <summary> Creates a new <code>WildcardTermEnum</code>.
		/// <p/>
		/// After calling the constructor the enumeration is already pointing to the first 
		/// valid term if such a term exists.
		/// </summary>
		public WildcardTermEnum(IndexReader reader, Term term):base()
		{
			searchTerm = term;
			field = searchTerm.Field();
			System.String searchTermText = searchTerm.Text();
			
			int sidx = searchTermText.IndexOf((System.Char) WILDCARD_STRING);
			int cidx = searchTermText.IndexOf((System.Char) WILDCARD_CHAR);
			int idx = sidx;
			if (idx == - 1)
			{
				idx = cidx;
			}
			else if (cidx >= 0)
			{
				idx = System.Math.Min(idx, cidx);
			}
			pre = idx != - 1?searchTerm.Text().Substring(0, (idx) - (0)):"";
			
			preLen = pre.Length;
			text = searchTermText.Substring(preLen);
			SetEnum(reader.Terms(new Term(searchTerm.Field(), pre)));
		}
Beispiel #41
0
		/// <summary> Returns a comparator for sorting hits according to values in the given field.
		/// The terms in the field are looked at to determine whether they contain integers,
		/// floats or strings.  Once the type is determined, one of the other static methods
		/// in this class is called to get the comparator.
		/// </summary>
		/// <param name="reader"> Index to use.
		/// </param>
		/// <param name="fieldname"> Fieldable containing values.
		/// </param>
		/// <returns>  Comparator for sorting hits.
		/// </returns>
		/// <throws>  IOException If an error occurs reading the index. </throws>
		internal static ScoreDocComparator ComparatorAuto(IndexReader reader, System.String fieldname)
		{
			System.String field = String.Intern(fieldname);
			System.Object lookupArray = Mono.Lucene.Net.Search.FieldCache_Fields.DEFAULT.GetAuto(reader, field);
			if (lookupArray is Mono.Lucene.Net.Search.StringIndex)
			{
				return comparatorString(reader, field);
			}
			else if (lookupArray is int[])
			{
				return comparatorInt(reader, field, null);
			}
			else if (lookupArray is long[])
			{
				return comparatorLong(reader, field, null);
			}
			else if (lookupArray is float[])
			{
				return comparatorFloat(reader, field, null);
			}
			else if (lookupArray is System.String[])
			{
				return comparatorString(reader, field);
			}
			else
			{
				throw new System.SystemException("unknown data type in field '" + field + "'");
			}
		}
Beispiel #42
0
		/// <summary> Returns a comparator for sorting hits according to a field containing strings.</summary>
		/// <param name="reader"> Index to use.
		/// </param>
		/// <param name="fieldname"> Fieldable containing string values.
		/// </param>
		/// <returns>  Comparator for sorting hits.
		/// </returns>
		/// <throws>  IOException If an error occurs reading the index. </throws>
		internal static ScoreDocComparator comparatorStringLocale(IndexReader reader, System.String fieldname, System.Globalization.CultureInfo locale)
		{
			System.Globalization.CompareInfo collator = locale.CompareInfo;
			System.String field = String.Intern(fieldname);
			System.String[] index = Mono.Lucene.Net.Search.FieldCache_Fields.DEFAULT.GetStrings(reader, field);
			return new AnonymousClassScoreDocComparator7(index, collator);
		}
Beispiel #43
0
		public override Spans GetSpans(IndexReader reader)
		{
			if (clauses.Count == 1)
			// optimize 1-clause case
				return ((SpanQuery) clauses[0]).GetSpans(reader);
			
			return new AnonymousClassSpans(reader, this);
		}
Beispiel #44
0
		public override Spans GetSpans(IndexReader reader)
		{
			return new AnonymousClassSpans(reader, this);
		}
Beispiel #45
0
		public override Query Rewrite(IndexReader reader)
		{
			if (!termLongEnough)
			{
				// can only match if it's exact
				return new TermQuery(term);
			}
			
			FilteredTermEnum enumerator = GetEnum(reader);
			int maxClauseCount = BooleanQuery.GetMaxClauseCount();
			ScoreTermQueue stQueue = new ScoreTermQueue(maxClauseCount);
			ScoreTerm reusableST = null;
			
			try
			{
				do 
				{
					float score = 0.0f;
					Term t = enumerator.Term();
					if (t != null)
					{
						score = enumerator.Difference();
						if (reusableST == null)
						{
							reusableST = new ScoreTerm(t, score);
						}
						else if (score >= reusableST.score)
						{
							// reusableST holds the last "rejected" entry, so, if
							// this new score is not better than that, there's no
							// need to try inserting it
							reusableST.score = score;
							reusableST.term = t;
						}
						else
						{
							continue;
						}
						
						reusableST = (ScoreTerm) stQueue.InsertWithOverflow(reusableST);
					}
				}
				while (enumerator.Next());
			}
			finally
			{
				enumerator.Close();
			}
			
			BooleanQuery query = new BooleanQuery(true);
			int size = stQueue.Size();
			for (int i = 0; i < size; i++)
			{
				ScoreTerm st = (ScoreTerm) stQueue.Pop();
				TermQuery tq = new TermQuery(st.term); // found a match
				tq.SetBoost(GetBoost() * st.score); // set the boost
				query.Add(tq, BooleanClause.Occur.SHOULD); // add to query
			}
			
			return query;
		}
Beispiel #46
0
		/// <summary>Optimize our representation and our subqueries representations</summary>
		/// <param name="reader">the IndexReader we query
		/// </param>
		/// <returns> an optimized copy of us (which may not be a copy if there is nothing to optimize) 
		/// </returns>
		public override Query Rewrite(IndexReader reader)
		{
			int numDisjunctions = disjuncts.Count;
			if (numDisjunctions == 1)
			{
				Query singleton = (Query) disjuncts[0];
				Query result = singleton.Rewrite(reader);
				if (GetBoost() != 1.0f)
				{
					if (result == singleton)
						result = (Query) result.Clone();
					result.SetBoost(GetBoost() * result.GetBoost());
				}
				return result;
			}
			DisjunctionMaxQuery clone = null;
			for (int i = 0; i < numDisjunctions; i++)
			{
				Query clause = (Query) disjuncts[i];
				Query rewrite = clause.Rewrite(reader);
				if (rewrite != clause)
				{
					if (clone == null)
						clone = (DisjunctionMaxQuery) this.Clone();
					clone.disjuncts[i] = rewrite;
				}
			}
			if (clone != null)
				return clone;
			else
				return this;
		}
Beispiel #47
0
			/* Create the scorer used to score our associated DisjunctionMaxQuery */
			public override Scorer Scorer(IndexReader reader, bool scoreDocsInOrder, bool topScorer)
			{
				Scorer[] scorers = new Scorer[weights.Count];
				int idx = 0;
				for (System.Collections.IEnumerator iter = weights.GetEnumerator(); iter.MoveNext(); )
				{
					Weight w = (Weight) iter.Current;
					Scorer subScorer = w.Scorer(reader, true, false);
					if (subScorer != null && subScorer.NextDoc() != DocIdSetIterator.NO_MORE_DOCS)
					{
						scorers[idx++] = subScorer;
					}
				}
				if (idx == 0)
					return null; // all scorers did not have documents
				DisjunctionMaxScorer result = new DisjunctionMaxScorer(Enclosing_Instance.tieBreakerMultiplier, similarity, scorers, idx);
				return result;
			}
Beispiel #48
0
		/// <summary> Creates a hit queue sorted by the given list of fields.</summary>
		/// <param name="reader"> Index to use.
		/// </param>
		/// <param name="fields">Fieldable names, in priority order (highest priority first).  Cannot be <code>null</code> or empty.
		/// </param>
		/// <param name="size"> The number of hits to retain.  Must be greater than zero.
		/// </param>
		/// <throws>  IOException </throws>
		public FieldSortedHitQueue(IndexReader reader, SortField[] fields, int size)
		{
			int n = fields.Length;
			comparators = new ScoreDocComparator[n];
			this.fields = new SortField[n];
			for (int i = 0; i < n; ++i)
			{
				System.String fieldname = fields[i].GetField();
				comparators[i] = GetCachedComparator(reader, fieldname, fields[i].GetType(), fields[i].GetParser(), fields[i].GetLocale(), fields[i].GetFactory());
				// new SortField instances must only be created when auto-detection is in use
				if (fields[i].GetType() == SortField.AUTO)
				{
					if (comparators[i].SortType() == SortField.STRING)
					{
						this.fields[i] = new SortField(fieldname, fields[i].GetLocale(), fields[i].GetReverse());
					}
					else
					{
						this.fields[i] = new SortField(fieldname, comparators[i].SortType(), fields[i].GetReverse());
					}
				}
				else
				{
					System.Diagnostics.Debug.Assert(comparators [i].SortType() == fields [i].GetType());
					this.fields[i] = fields[i];
				}
			}
			Initialize(size);
		}
Beispiel #49
0
		/// <param name="reader">that contains doc with payloads to extract
		/// </param>
		public PayloadSpanUtil(IndexReader reader)
		{
			this.reader = reader;
		}
Beispiel #50
0
			public override void  SetNextReader(IndexReader reader, int docBase)
			{
				collector.SetNextReader(reader, start + docBase);
			}
Beispiel #51
0
		// inherit javadocs
		public virtual ScoreDocComparator NewComparator(IndexReader reader, System.String fieldname)
		{
			System.String field = String.Intern(fieldname);
			System.IComparable[] cachedValues = Mono.Lucene.Net.Search.FieldCache_Fields.DEFAULT.GetCustom(reader, field, this);
			
			return new AnonymousClassScoreDocComparator(cachedValues, this);
		}
Beispiel #52
0
			protected internal override System.Object CreateValue(IndexReader reader, FieldCacheImpl.Entry entryKey)
			{
				FieldCacheImpl.Entry entry = (FieldCacheImpl.Entry) entryKey;
				System.String fieldname = entry.field;
				int type = entry.type;
				System.Globalization.CultureInfo locale = entry.locale;
				Mono.Lucene.Net.Search.Parser parser = null;
				SortComparatorSource factory = null;
				if (entry.custom is SortComparatorSource)
				{
					factory = (SortComparatorSource) entry.custom;
				}
				else
				{
					parser = (Mono.Lucene.Net.Search.Parser) entry.custom;
				}
				ScoreDocComparator comparator;
				switch (type)
				{
					
					case SortField.AUTO: 
						comparator = Mono.Lucene.Net.Search.FieldSortedHitQueue.ComparatorAuto(reader, fieldname);
						break;
					
					case SortField.INT: 
						comparator = Mono.Lucene.Net.Search.FieldSortedHitQueue.comparatorInt(reader, fieldname, (Mono.Lucene.Net.Search.IntParser) parser);
						break;
					
					case SortField.FLOAT: 
						comparator = Mono.Lucene.Net.Search.FieldSortedHitQueue.comparatorFloat(reader, fieldname, (Mono.Lucene.Net.Search.FloatParser) parser);
						break;
					
					case SortField.LONG: 
						comparator = Mono.Lucene.Net.Search.FieldSortedHitQueue.comparatorLong(reader, fieldname, (Mono.Lucene.Net.Search.LongParser) parser);
						break;
					
					case SortField.DOUBLE: 
						comparator = Mono.Lucene.Net.Search.FieldSortedHitQueue.comparatorDouble(reader, fieldname, (Mono.Lucene.Net.Search.DoubleParser) parser);
						break;
					
					case SortField.SHORT: 
						comparator = Mono.Lucene.Net.Search.FieldSortedHitQueue.comparatorShort(reader, fieldname, (Mono.Lucene.Net.Search.ShortParser) parser);
						break;
					
					case SortField.BYTE: 
						comparator = Mono.Lucene.Net.Search.FieldSortedHitQueue.comparatorByte(reader, fieldname, (Mono.Lucene.Net.Search.ByteParser) parser);
						break;
					
					case SortField.STRING: 
						if (locale != null)
							comparator = Mono.Lucene.Net.Search.FieldSortedHitQueue.comparatorStringLocale(reader, fieldname, locale);
						else
							comparator = Mono.Lucene.Net.Search.FieldSortedHitQueue.comparatorString(reader, fieldname);
						break;
					
					case SortField.CUSTOM: 
						comparator = factory.NewComparator(reader, fieldname);
						break;
					
					default: 
						throw new System.SystemException("unknown field type: " + type);
					
				}
				return comparator;
			}
Beispiel #53
0
			/* Explain the score we computed for doc */
			public override Explanation Explain(IndexReader reader, int doc)
			{
				if (Enclosing_Instance.disjuncts.Count == 1)
					return ((Weight) weights[0]).Explain(reader, doc);
				ComplexExplanation result = new ComplexExplanation();
				float max = 0.0f, sum = 0.0f;
				result.SetDescription(Enclosing_Instance.tieBreakerMultiplier == 0.0f?"max of:":"max plus " + Enclosing_Instance.tieBreakerMultiplier + " times others of:");
				for (System.Collections.IEnumerator iter = weights.GetEnumerator(); iter.MoveNext(); )
				{
					Explanation e = ((Weight) iter.Current).Explain(reader, doc);
					if (e.IsMatch())
					{
						System.Boolean tempAux = true;
						result.SetMatch(tempAux);
						result.AddDetail(e);
						sum += e.GetValue();
						max = System.Math.Max(max, e.GetValue());
					}
				}
				result.SetValue(max + (sum - max) * Enclosing_Instance.tieBreakerMultiplier);
				return result;
			}
Beispiel #54
0
		internal static ScoreDocComparator GetCachedComparator(IndexReader reader, System.String field, int type, Mono.Lucene.Net.Search.Parser parser, System.Globalization.CultureInfo locale, SortComparatorSource factory)
		{
			if (type == SortField.DOC)
				return Mono.Lucene.Net.Search.ScoreDocComparator_Fields.INDEXORDER;
			if (type == SortField.SCORE)
				return Mono.Lucene.Net.Search.ScoreDocComparator_Fields.RELEVANCE;
			FieldCacheImpl.Entry entry = (factory != null)?new FieldCacheImpl.Entry(field, factory):((parser != null)?new FieldCacheImpl.Entry(field, type, parser):new FieldCacheImpl.Entry(field, type, locale));
			return (ScoreDocComparator) Comparators.Get(reader, entry);
		}
Beispiel #55
0
		public /*protected internal*/ override FilteredTermEnum GetEnum(IndexReader reader)
		{
			return new FuzzyTermEnum(reader, GetTerm(), minimumSimilarity, prefixLength);
		}
Beispiel #56
0
		/// <summary> Returns a comparator for sorting hits according to a field containing floats.</summary>
		/// <param name="reader"> Index to use.
		/// </param>
		/// <param name="fieldname"> Fieldable containing float values.
		/// </param>
		/// <returns>  Comparator for sorting hits.
		/// </returns>
		/// <throws>  IOException If an error occurs reading the index. </throws>
		internal static ScoreDocComparator comparatorFloat(IndexReader reader, System.String fieldname, Mono.Lucene.Net.Search.FloatParser parser)
		{
			System.String field = String.Intern(fieldname);
			float[] fieldOrder = Mono.Lucene.Net.Search.FieldCache_Fields.DEFAULT.GetFloats(reader, field, parser);
			return new AnonymousClassScoreDocComparator4(fieldOrder);
		}
Beispiel #57
0
			public override void  SetNextReader(IndexReader reader, int docBase)
			{
				// not needed by this implementation
			}
Beispiel #58
0
		/// <summary> Returns a comparator for sorting hits according to a field containing doubles.</summary>
		/// <param name="reader"> Index to use.
		/// </param>
		/// <param name="fieldname"> Fieldable containing float values.
		/// </param>
		/// <returns>  Comparator for sorting hits.
		/// </returns>
		/// <throws>  IOException If an error occurs reading the index. </throws>
		internal static ScoreDocComparator comparatorDouble(IndexReader reader, System.String fieldname, Mono.Lucene.Net.Search.DoubleParser parser)
		{
			System.String field = String.Intern(fieldname);
			double[] fieldOrder = Mono.Lucene.Net.Search.FieldCache_Fields.DEFAULT.GetDoubles(reader, field, parser);
			return new AnonymousClassScoreDocComparator5(fieldOrder);
		}
Beispiel #59
0
		public override Query Rewrite(IndexReader reader)
		{
			SpanFirstQuery clone = null;
			
			SpanQuery rewritten = (SpanQuery) match.Rewrite(reader);
			if (rewritten != match)
			{
				clone = (SpanFirstQuery) this.Clone();
				clone.match = rewritten;
			}
			
			if (clone != null)
			{
				return clone; // some clauses rewrote
			}
			else
			{
				return this; // no clauses rewrote
			}
		}
Beispiel #60
0
		/// <summary> Returns a comparator for sorting hits according to a field containing strings.</summary>
		/// <param name="reader"> Index to use.
		/// </param>
		/// <param name="fieldname"> Fieldable containing string values.
		/// </param>
		/// <returns>  Comparator for sorting hits.
		/// </returns>
		/// <throws>  IOException If an error occurs reading the index. </throws>
		internal static ScoreDocComparator comparatorString(IndexReader reader, System.String fieldname)
		{
			System.String field = String.Intern(fieldname);
			Mono.Lucene.Net.Search.StringIndex index = Mono.Lucene.Net.Search.FieldCache_Fields.DEFAULT.GetStringIndex(reader, field);
			return new AnonymousClassScoreDocComparator6(index);
		}