コード例 #1
0
 public override void  Search(Weight weight, Filter filter, Collector collector)
 {
     if (filter == null)
     {
         for (int i = 0; i < subReaders.Length; i++)
         {
             // search each subreader
             collector.SetNextReader(subReaders[i], docStarts[i]);
             Scorer scorer = weight.Scorer(subReaders[i], !collector.AcceptsDocsOutOfOrder(), true);
             if (scorer != null)
             {
                 scorer.Score(collector);
             }
         }
     }
     else
     {
         for (int i = 0; i < subReaders.Length; i++)
         {
             // search each subreader
             collector.SetNextReader(subReaders[i], docStarts[i]);
             SearchWithFilter(subReaders[i], weight, filter, collector);
         }
     }
 }
コード例 #2
0
ファイル: IndexSearcher.cs プロジェクト: freemsly/lucenenet
 /// <summary>
 /// Lower-level search API.
 ///
 /// <p>
 /// <seealso cref="Collector#collect(int)"/> is called for every document. <br>
 ///
 /// <p>
 /// NOTE: this method executes the searches on all given leaves exclusively.
 /// To search across all the searchers leaves use <seealso cref="#leafContexts"/>.
 /// </summary>
 /// <param name="leaves">
 ///          the searchers leaves to execute the searches on </param>
 /// <param name="weight">
 ///          to match documents </param>
 /// <param name="collector">
 ///          to receive hits </param>
 /// <exception cref="BooleanQuery.TooManyClauses"> If a query would exceed
 ///         <seealso cref="BooleanQuery#getMaxClauseCount()"/> clauses. </exception>
 protected internal virtual void Search(IList <AtomicReaderContext> leaves, Weight weight, Collector collector)
 {
     // TODO: should we make this
     // threaded...?  the Collector could be sync'd?
     // always use single thread:
     foreach (AtomicReaderContext ctx in leaves) // search each subreader
     {
         try
         {
             collector.NextReader = ctx;
         }
         catch (CollectionTerminatedException)
         {
             // there is no doc of interest in this reader context
             // continue with the following leaf
             continue;
         }
         BulkScorer scorer = weight.BulkScorer(ctx, !collector.AcceptsDocsOutOfOrder(), ctx.AtomicReader.LiveDocs);
         if (scorer != null)
         {
             try
             {
                 scorer.Score(collector);
             }
             catch (CollectionTerminatedException)
             {
                 // collection was terminated prematurely
                 // continue with the following leaf
             }
         }
     }
 }
コード例 #3
0
 internal RandomOrderCollector(Random random, Collector @in)
 {
     if ([email protected]())
     {
         throw new System.ArgumentException();
     }
     this.@in = @in;
     this.Random = random;
     BufferSize = 1 + random.Next(100);
     DocIDs = new int[BufferSize];
     Scores = new float[BufferSize];
     Freqs = new int[BufferSize];
     Buffered = 0;
 }
コード例 #4
0
 internal RandomOrderCollector(Random random, Collector @in)
 {
     if ([email protected]())
     {
         throw new System.ArgumentException();
     }
     this.@in    = @in;
     this.Random = random;
     BufferSize  = 1 + random.Next(100);
     DocIDs      = new int[BufferSize];
     Scores      = new float[BufferSize];
     Freqs       = new int[BufferSize];
     Buffered    = 0;
 }
コード例 #5
0
ファイル: CachingCollector.cs プロジェクト: zfxsss/lucenenet
        /// <summary>
        /// Reused by the specialized inner classes. </summary>
        internal virtual void ReplayInit(Collector other)
        {
            if (!Cached)
            {
                throw new InvalidOperationException("cannot replay: cache was cleared because too much RAM was required");
            }

            if (!other.AcceptsDocsOutOfOrder() && this.Other.AcceptsDocsOutOfOrder())
            {
                throw new System.ArgumentException("cannot replay: given collector does not support " + "out-of-order collection, while the wrapped collector does. " + "Therefore cached documents may be out-of-order.");
            }

            //System.out.println("CC: replay totHits=" + (upto + base));
            if (LastReaderContext != null)
            {
                CachedSegs.Add(new SegStart(LastReaderContext, @base + Upto));
                LastReaderContext = null;
            }
        }
コード例 #6
0
        public virtual void TestCollector()
        {
            // Tests that the collector delegates calls to input collectors properly.

            // Tests that the collector handles some null collectors well. If it
            // doesn't, an NPE would be thrown.
            DummyCollector[] dcs = new DummyCollector[] { new DummyCollector(), new DummyCollector() };
            Collector        c   = MultiCollector.Wrap(dcs);

            Assert.IsTrue(c.AcceptsDocsOutOfOrder());
            c.Collect(1);
            c.NextReader = null;
            c.Scorer     = null;

            foreach (DummyCollector dc in dcs)
            {
                Assert.IsTrue(dc.AcceptsDocsOutOfOrderCalled);
                Assert.IsTrue(dc.CollectCalled);
                Assert.IsTrue(dc.SetNextReaderCalled);
                Assert.IsTrue(dc.SetScorerCalled);
            }
        }
コード例 #7
0
        public virtual void TestNullCollectors()
        {
            // Tests that the collector rejects all null collectors.
            try
            {
                MultiCollector.Wrap(null, null);
                Assert.Fail("only null collectors should not be supported");
            }
            catch (System.ArgumentException e)
            {
                // expected
            }

            // Tests that the collector handles some null collectors well. If it
            // doesn't, an NPE would be thrown.
            Collector c = MultiCollector.Wrap(new DummyCollector(), null, new DummyCollector());

            Assert.IsTrue(c is MultiCollector);
            Assert.IsTrue(c.AcceptsDocsOutOfOrder());
            c.Collect(1);
            c.NextReader = null;
            c.Scorer     = null;
        }
コード例 #8
0
 public override bool AcceptsDocsOutOfOrder()
 {
     return(Other.AcceptsDocsOutOfOrder());
 }
コード例 #9
0
ファイル: IndexSearcher.cs プロジェクト: sinsay/SSE
 public override void Search(Weight weight, Filter filter, Collector collector)
 {
     if (filter == null)
     {
         for (int i = 0; i < subReaders.Length; i++)
         {
             // search each subreader
             collector.SetNextReader(subReaders[i], docStarts[i]);
             Scorer scorer = weight.Scorer(subReaders[i], !collector.AcceptsDocsOutOfOrder(), true);
             if (scorer != null)
             {
                 scorer.Score(collector);
             }
         }
     }
     else
     {
         for (int i = 0; i < subReaders.Length; i++)
         {
             // search each subreader
             collector.SetNextReader(subReaders[i], docStarts[i]);
             SearchWithFilter(subReaders[i], weight, filter, collector);
         }
     }
 }
コード例 #10
0
 public override bool AcceptsDocsOutOfOrder()
 {
     return(collector.AcceptsDocsOutOfOrder());
 }
コード例 #11
0
        /// <summary>
        /// Reused by the specialized inner classes. </summary>
        internal virtual void ReplayInit(Collector other)
        {
            if (!Cached)
            {
                throw new InvalidOperationException("cannot replay: cache was cleared because too much RAM was required");
            }

            if (!other.AcceptsDocsOutOfOrder() && this.Other.AcceptsDocsOutOfOrder())
            {
                throw new System.ArgumentException("cannot replay: given collector does not support " + "out-of-order collection, while the wrapped collector does. " + "Therefore cached documents may be out-of-order.");
            }

            //System.out.println("CC: replay totHits=" + (upto + base));
            if (LastReaderContext != null)
            {
                CachedSegs.Add(new SegStart(LastReaderContext, @base + Upto));
                LastReaderContext = null;
            }
        }