コード例 #1
0
ファイル: QueryValueSource.cs プロジェクト: voquanghoa/YAFNET
        public QueryDocValues(QueryValueSource vs, AtomicReaderContext readerContext, IDictionary fcontext)
            : base(vs)
        {
            this.readerContext = readerContext;
            this.acceptDocs    = readerContext.AtomicReader.LiveDocs;
            this.defVal        = vs.defVal;
            this.q             = vs.q;
            this.fcontext      = fcontext;

            Weight w = fcontext == null ? null : (Weight)fcontext[vs];

            if (w == null)
            {
                IndexSearcher weightSearcher;
                if (fcontext == null)
                {
                    weightSearcher = new IndexSearcher(ReaderUtil.GetTopLevelContext(readerContext));
                }
                else
                {
                    weightSearcher = (IndexSearcher)fcontext["searcher"];
                    if (weightSearcher == null)
                    {
                        weightSearcher = new IndexSearcher(ReaderUtil.GetTopLevelContext(readerContext));
                    }
                }
                vs.CreateWeight(fcontext, weightSearcher);
                w = (Weight)fcontext[vs];
            }
            weight = w;
        }
コード例 #2
0
        public override FunctionValues GetValues(IDictionary context, AtomicReaderContext readerContext)
        {
            BinaryDocValues terms     = m_cache.GetTerms(readerContext.AtomicReader, m_field, false, PackedInt32s.FAST);
            IndexReader     top       = ReaderUtil.GetTopLevelContext(readerContext).Reader;
            Terms           t         = MultiFields.GetTerms(top, m_qfield);
            TermsEnum       termsEnum = t is null ? TermsEnum.EMPTY : t.GetEnumerator();

            return(new Int32DocValuesAnonymousClass(this, this, terms, termsEnum));
        }
コード例 #3
0
ファイル: OrdFieldSource.cs プロジェクト: thevisad/YAFNET
        // TODO: this is trappy? perhaps this query instead should make you pass a slow reader yourself?
        public override FunctionValues GetValues(IDictionary context, AtomicReaderContext readerContext)
        {
            int             off       = readerContext.DocBase;
            IndexReader     topReader = ReaderUtil.GetTopLevelContext(readerContext).Reader;
            AtomicReader    r         = SlowCompositeReaderWrapper.Wrap(topReader);
            SortedDocValues sindex    = FieldCache.DEFAULT.GetTermsIndex(r, m_field);

            return(new Int32DocValuesAnonymousInnerClassHelper(this, this, off, sindex));
        }
コード例 #4
0
        public override FunctionValues GetValues(IDictionary context, AtomicReaderContext readerContext)
        {
            BinaryDocValues terms     = cache.GetTerms(readerContext.AtomicReader, field, false, PackedInts.FAST);
            IndexReader     top       = ReaderUtil.GetTopLevelContext(readerContext).Reader;
            Terms           t         = MultiFields.GetTerms(top, qfield);
            TermsEnum       termsEnum = t == null ? TermsEnum.EMPTY : t.Iterator(null);

            return(new IntDocValuesAnonymousInnerClassHelper(this, this, terms, termsEnum));
        }
コード例 #5
0
        // TODO: this is trappy? perhaps this query instead should make you pass a slow reader yourself?
        public override FunctionValues GetValues(IDictionary context, AtomicReaderContext readerContext)
        {
            IndexReader  topReader = ReaderUtil.GetTopLevelContext(readerContext).Reader;
            AtomicReader r         = SlowCompositeReaderWrapper.Wrap(topReader);
            int          off       = readerContext.DocBase;

            var sindex = FieldCache.DEFAULT.GetTermsIndex(r, field);
            var end    = sindex.ValueCount;

            return(new Int32DocValuesAnonymousClass(this, off, sindex, end));
        }
コード例 #6
0
            public override Scorer GetScorer(AtomicReaderContext context, IBits acceptDocs)
            {
                Debug.Assert(termStates.TopReaderContext == ReaderUtil.GetTopLevelContext(context), "The top-reader used to create Weight (" + termStates.TopReaderContext + ") is not the same as the current reader's top-reader (" + ReaderUtil.GetTopLevelContext(context));
                TermsEnum termsEnum = GetTermsEnum(context);

                if (termsEnum == null)
                {
                    return(null);
                }
                DocsEnum docs = termsEnum.Docs(acceptDocs, null);

                Debug.Assert(docs != null);
                return(new TermScorer(this, docs, similarity.GetSimScorer(stats, context)));
            }
コード例 #7
0
        private ScaleInfo CreateScaleInfo(IDictionary context, AtomicReaderContext readerContext)
        {
            var leaves = ReaderUtil.GetTopLevelContext(readerContext).Leaves;

            float minVal = float.PositiveInfinity;
            float maxVal = float.NegativeInfinity;

            foreach (AtomicReaderContext leaf in leaves)
            {
                int            maxDoc = leaf.Reader.MaxDoc;
                FunctionValues vals   = m_source.GetValues(context, leaf);
                for (int i = 0; i < maxDoc; i++)
                {
                    float val = vals.SingleVal(i);
                    if ((J2N.BitConversion.SingleToRawInt32Bits(val) & (0xff << 23)) == 0xff << 23)
                    {
                        // if the exponent in the float is all ones, then this is +Inf, -Inf or NaN
                        // which don't make sense to factor into the scale function
                        continue;
                    }
                    if (val < minVal)
                    {
                        minVal = val;
                    }
                    if (val > maxVal)
                    {
                        maxVal = val;
                    }
                }
            }

            if (minVal == float.PositiveInfinity)
            {
                // must have been an empty index
                minVal = maxVal = 0;
            }

            var scaleInfo = new ScaleInfo {
                MinVal = minVal, MaxVal = maxVal
            };

            context[this] = scaleInfo;
            return(scaleInfo);
        }
コード例 #8
0
ファイル: NumDocsValueSource.cs プロジェクト: thevisad/YAFNET
 public override FunctionValues GetValues(IDictionary context, AtomicReaderContext readerContext)
 {
     // Searcher has no numdocs so we must use the reader instead
     return(new ConstInt32DocValues(ReaderUtil.GetTopLevelContext(readerContext).Reader.NumDocs, this));
 }
コード例 #9
0
        /// <summary>
        /// Does all the "real work" of tallying up the counts. </summary>
        private void Count(IList <FacetsCollector.MatchingDocs> matchingDocs)
        {
            //System.out.println("ssdv count");

            MultiDocValues.OrdinalMap ordinalMap;

            // TODO: is this right?  really, we need a way to
            // verify that this ordinalMap "matches" the leaves in
            // matchingDocs...
            if (dv is MultiDocValues.MultiSortedSetDocValues && matchingDocs.Count > 1)
            {
                ordinalMap = ((MultiDocValues.MultiSortedSetDocValues)dv).Mapping;
            }
            else
            {
                ordinalMap = null;
            }

            IndexReader origReader = state.OrigReader;

            foreach (FacetsCollector.MatchingDocs hits in matchingDocs)
            {
                var reader = hits.context.AtomicReader;
                //System.out.println("  reader=" + reader);
                // LUCENE-5090: make sure the provided reader context "matches"
                // the top-level reader passed to the
                // SortedSetDocValuesReaderState, else cryptic
                // AIOOBE can happen:
                if (!Equals(ReaderUtil.GetTopLevelContext(hits.context).Reader, origReader))
                {
                    throw new ThreadStateException("the SortedSetDocValuesReaderState provided to this class does not match the reader being searched; you must create a new SortedSetDocValuesReaderState every time you open a new IndexReader");
                }

                SortedSetDocValues segValues = reader.GetSortedSetDocValues(field);
                if (segValues == null)
                {
                    continue;
                }

                DocIdSetIterator docs = hits.bits.GetIterator();

                // TODO: yet another option is to count all segs
                // first, only in seg-ord space, and then do a
                // merge-sort-PQ in the end to only "resolve to
                // global" those seg ords that can compete, if we know
                // we just want top K?  ie, this is the same algo
                // that'd be used for merging facets across shards
                // (distributed faceting).  but this has much higher
                // temp ram req'ts (sum of number of ords across all
                // segs)
                if (ordinalMap != null)
                {
                    int segOrd = hits.context.Ord;

                    int numSegOrds = (int)segValues.ValueCount;

                    if (hits.totalHits < numSegOrds / 10)
                    {
                        //System.out.println("    remap as-we-go");
                        // Remap every ord to global ord as we iterate:
                        int doc;
                        while ((doc = docs.NextDoc()) != DocIdSetIterator.NO_MORE_DOCS)
                        {
                            //System.out.println("    doc=" + doc);
                            segValues.Document = doc;
                            int term = (int)segValues.NextOrd();
                            while (term != SortedSetDocValues.NO_MORE_ORDS)
                            {
                                //System.out.println("      segOrd=" + segOrd + " ord=" + term + " globalOrd=" + ordinalMap.getGlobalOrd(segOrd, term));
                                counts[(int)ordinalMap.GetGlobalOrd(segOrd, term)]++;
                                term = (int)segValues.NextOrd();
                            }
                        }
                    }
                    else
                    {
                        //System.out.println("    count in seg ord first");

                        // First count in seg-ord space:
                        int[] segCounts = new int[numSegOrds];
                        int   doc;
                        while ((doc = docs.NextDoc()) != DocIdSetIterator.NO_MORE_DOCS)
                        {
                            //System.out.println("    doc=" + doc);
                            segValues.Document = doc;
                            int term = (int)segValues.NextOrd();
                            while (term != SortedSetDocValues.NO_MORE_ORDS)
                            {
                                //System.out.println("      ord=" + term);
                                segCounts[term]++;
                                term = (int)segValues.NextOrd();
                            }
                        }

                        // Then, migrate to global ords:
                        for (int ord = 0; ord < numSegOrds; ord++)
                        {
                            int count = segCounts[ord];
                            if (count != 0)
                            {
                                //System.out.println("    migrate segOrd=" + segOrd + " ord=" + ord + " globalOrd=" + ordinalMap.getGlobalOrd(segOrd, ord));
                                counts[(int)ordinalMap.GetGlobalOrd(segOrd, ord)] += count;
                            }
                        }
                    }
                }
                else
                {
                    // No ord mapping (e.g., single segment index):
                    // just aggregate directly into counts:
                    int doc;
                    while ((doc = docs.NextDoc()) != DocIdSetIterator.NO_MORE_DOCS)
                    {
                        segValues.Document = doc;
                        int term = (int)segValues.NextOrd();
                        while (term != SortedSetDocValues.NO_MORE_ORDS)
                        {
                            counts[term]++;
                            term = (int)segValues.NextOrd();
                        }
                    }
                }
            }
        }