public virtual void SetNextReader(AtomicReaderContext context) { // confirm that skipping beyond the last doc, on the // previous reader, hits NO_MORE_DOCS if (lastReader[0] != null) { AtomicReader previousReader = lastReader[0]; IndexSearcher indexSearcher = LuceneTestCase.NewSearcher( #if FEATURE_INSTANCE_TESTDATA_INITIALIZATION luceneTestCase, #endif previousReader); indexSearcher.Similarity = s.Similarity; Weight w = indexSearcher.CreateNormalizedWeight(q); Scorer scorer = w.GetScorer((AtomicReaderContext)indexSearcher.TopReaderContext, previousReader.LiveDocs); if (scorer != null) { bool more = scorer.Advance(lastDoc[0] + 1) != DocIdSetIterator.NO_MORE_DOCS; Assert.IsFalse(more, "query's last doc was " + lastDoc[0] + " but skipTo(" + (lastDoc[0] + 1) + ") got to " + scorer.DocID); } leafPtr++; } lastReader[0] = (AtomicReader)context.Reader; lastDoc[0] = -1; liveDocs = ((AtomicReader)context.Reader).LiveDocs; }
/// <summary> /// Given an <see cref="IndexSearcher"/>, returns a new <see cref="IndexSearcher"/> whose <see cref="IndexReader"/> /// is a <see cref="MultiReader"/> containing the <see cref="IndexReader"/> of the original <see cref="IndexSearcher"/>, /// as well as several "empty" <see cref="IndexReader"/>s -- some of which will have /// deleted documents in them. This new <see cref="IndexSearcher"/> should /// behave exactly the same as the original <see cref="IndexSearcher"/>. </summary> /// <param name="luceneTestCase">The current test instance.</param> /// <param name="s"> The searcher to wrap. </param> /// <param name="edge"> If negative, s will be the first sub; if 0, s will be in the middle, if positive s will be the last sub. </param> // LUCENENET specific // Removes dependency on <see cref="LuceneTestCase.ClassEnv.Similarity"/> public static IndexSearcher WrapUnderlyingReader(LuceneTestCase luceneTestCase, Random random, IndexSearcher s, int edge) #endif { IndexReader r = s.IndexReader; // we can't put deleted docs before the nested reader, because // it will throw off the docIds IndexReader[] readers = new IndexReader[] { edge < 0 ? r : emptyReaders[0], emptyReaders[0], new FCInvisibleMultiReader(edge < 0 ? emptyReaders[4] : emptyReaders[0], emptyReaders[0], 0 == edge ? r : emptyReaders[0]), 0 < edge ? emptyReaders[0] : emptyReaders[7], emptyReaders[0], new FCInvisibleMultiReader(0 < edge ? emptyReaders[0] : emptyReaders[5], emptyReaders[0], 0 < edge ? r : emptyReaders[0]) }; IndexSearcher @out = LuceneTestCase.NewSearcher( #if FEATURE_INSTANCE_TESTDATA_INITIALIZATION luceneTestCase, #endif new FCInvisibleMultiReader(readers)); @out.Similarity = s.Similarity; return(@out); }
/// <summary> /// Sets up a RAMDirectory, and adds documents (using English.IntToEnglish()) with two fields: field and multiField /// and analyzes them using the PayloadAnalyzer </summary> /// <param name="similarity"> The Similarity class to use in the Searcher </param> /// <param name="numDocs"> The num docs to add </param> /// <returns> An IndexSearcher </returns> // TODO: randomize public virtual IndexSearcher SetUp(Random random, Similarity similarity, int numDocs) { Directory directory = new MockDirectoryWrapper(random, new RAMDirectory()); PayloadAnalyzer analyzer = new PayloadAnalyzer(this); // TODO randomize this IndexWriter writer = new IndexWriter(directory, (new IndexWriterConfig(LuceneTestCase.TEST_VERSION_CURRENT, analyzer)).SetSimilarity(similarity)); // writer.infoStream = System.out; for (int i = 0; i < numDocs; i++) { Document doc = new Document(); doc.Add(new TextField(FIELD, English.Int32ToEnglish(i), Field.Store.YES)); doc.Add(new TextField(MULTI_FIELD, English.Int32ToEnglish(i) + " " + English.Int32ToEnglish(i), Field.Store.YES)); doc.Add(new TextField(NO_PAYLOAD_FIELD, English.Int32ToEnglish(i), Field.Store.YES)); writer.AddDocument(doc); } Reader = DirectoryReader.Open(writer, true); writer.Dispose(); IndexSearcher searcher = LuceneTestCase.NewSearcher( #if FEATURE_INSTANCE_TESTDATA_INITIALIZATION null, // LUCENENET: Passing null allows us to bypass similarity, which we are setting here, anyway #endif Reader); searcher.Similarity = similarity; return(searcher); }
/// <summary> /// check that first skip on just created scorers always goes to the right doc</summary> /// <param name = "similarity" > /// LUCENENET specific /// Removes dependency on <see cref="LuceneTestCase.ClassEnv.Similarity"/> /// </param> public static void CheckFirstSkipTo(Query q, IndexSearcher s, Similarity similarity) { //System.out.println("checkFirstSkipTo: "+q); const float maxDiff = 1e-3f; int[] lastDoc = new int[] { -1 }; AtomicReader[] lastReader = new AtomicReader[] { null }; IList <AtomicReaderContext> context = s.TopReaderContext.Leaves; s.Search(q, new CollectorAnonymousInnerClassHelper2(q, s, maxDiff, lastDoc, lastReader, context, similarity)); if (lastReader[0] != null) { // confirm that skipping beyond the last doc, on the // previous reader, hits NO_MORE_DOCS AtomicReader previousReader = lastReader[0]; IndexSearcher indexSearcher = LuceneTestCase.NewSearcher(previousReader, similarity); indexSearcher.Similarity = s.Similarity; Weight w = indexSearcher.CreateNormalizedWeight(q); Scorer scorer = w.Scorer((AtomicReaderContext)indexSearcher.TopReaderContext, previousReader.LiveDocs); if (scorer != null) { bool more = scorer.Advance(lastDoc[0] + 1) != DocIdSetIterator.NO_MORE_DOCS; Assert.IsFalse(more, "query's last doc was " + lastDoc[0] + " but skipTo(" + (lastDoc[0] + 1) + ") got to " + scorer.DocID()); } } }
/// <summary> /// Sets up a RAMDirectory, and adds documents (using English.IntToEnglish()) with two fields: field and multiField /// and analyzes them using the PayloadAnalyzer </summary> /// <param name="similarity"> The Similarity class to use in the Searcher </param> /// <param name="numDocs"> The num docs to add </param> /// <returns> An IndexSearcher </returns> // TODO: randomize public virtual IndexSearcher SetUp(Random random, Similarity similarity, int numDocs) { Directory directory = new MockDirectoryWrapper(random, new RAMDirectory()); PayloadAnalyzer analyzer = new PayloadAnalyzer(this); // TODO randomize this IndexWriter writer = new IndexWriter(directory, (new IndexWriterConfig(LuceneTestCase.TEST_VERSION_CURRENT, analyzer)).SetSimilarity(similarity)); // writer.infoStream = System.out; for (int i = 0; i < numDocs; i++) { Document doc = new Document(); doc.Add(new TextField(FIELD, English.IntToEnglish(i), Field.Store.YES)); doc.Add(new TextField(MULTI_FIELD, English.IntToEnglish(i) + " " + English.IntToEnglish(i), Field.Store.YES)); doc.Add(new TextField(NO_PAYLOAD_FIELD, English.IntToEnglish(i), Field.Store.YES)); writer.AddDocument(doc); } Reader = DirectoryReader.Open(writer, true); writer.Dispose(); IndexSearcher searcher = LuceneTestCase.NewSearcher(Reader, similarity); searcher.Similarity = similarity; return(searcher); }
public virtual void SetNextReader(AtomicReaderContext context) { // confirm that skipping beyond the last doc, on the // previous reader, hits NO_MORE_DOCS if (lastReader[0] != null) { AtomicReader previousReader = lastReader[0]; IndexSearcher indexSearcher = LuceneTestCase.NewSearcher(previousReader); indexSearcher.Similarity = s.Similarity; Weight w = indexSearcher.CreateNormalizedWeight(q); AtomicReaderContext ctx = (AtomicReaderContext)indexSearcher.TopReaderContext; Scorer scorer = w.GetScorer(ctx, ((AtomicReader)ctx.Reader).LiveDocs); if (scorer != null) { bool more = scorer.Advance(lastDoc[0] + 1) != DocIdSetIterator.NO_MORE_DOCS; Assert.IsFalse(more, "query's last doc was " + lastDoc[0] + " but skipTo(" + (lastDoc[0] + 1) + ") got to " + scorer.DocID); } leafPtr++; } lastReader[0] = (AtomicReader)context.Reader; if (Debugging.AssertsEnabled) { Debugging.Assert(readerContextArray[leafPtr].Reader == context.Reader); } this.scorer = null; lastDoc[0] = -1; }
public override IndexSearcher NewSearcher(IndexReader ignored) { return(LuceneTestCase.NewSearcher( #if FEATURE_INSTANCE_TESTDATA_INITIALIZATION outerInstance, #endif other)); }
/// <summary> /// Alternate scorer skipTo(),skipTo(),next(),next(),skipTo(),skipTo(), etc /// and ensure a hitcollector receives same docs and scores. /// </summary> /// <param name="luceneTestCase">The current test instance.</param> /// <param name="q"></param> /// <param name="s"></param> // LUCENENET specific // Removes dependency on <see cref="LuceneTestCase.ClassEnv.Similarity"/> public static void CheckSkipTo(LuceneTestCase luceneTestCase, Query q, IndexSearcher s) #endif { //System.out.println("Checking "+q); IList <AtomicReaderContext> readerContextArray = s.TopReaderContext.Leaves; if (s.CreateNormalizedWeight(q).ScoresDocsOutOfOrder) // in this case order of skipTo() might differ from that of next(). { return; } const int skip_op = 0; const int next_op = 1; int[][] orders = new int[][] { new int[] { next_op }, new int[] { skip_op }, new int[] { skip_op, next_op }, new int[] { next_op, skip_op }, new int[] { skip_op, skip_op, next_op, next_op }, new int[] { next_op, next_op, skip_op, skip_op }, new int[] { skip_op, skip_op, skip_op, next_op, next_op } }; for (int k = 0; k < orders.Length; k++) { int[] order = orders[k]; // System.out.print("Order:");for (int i = 0; i < order.Length; i++) // System.out.print(order[i]==skip_op ? " skip()":" next()"); // System.out.println(); int[] opidx = new int[] { 0 }; int[] lastDoc = new int[] { -1 }; // FUTURE: ensure scorer.Doc()==-1 const float maxDiff = 1e-5f; AtomicReader[] lastReader = new AtomicReader[] { null }; s.Search(q, new CollectorAnonymousInnerClassHelper( #if FEATURE_INSTANCE_TESTDATA_INITIALIZATION luceneTestCase, #endif q, s, readerContextArray, skip_op, order, opidx, lastDoc, maxDiff, lastReader)); if (lastReader[0] != null) { // confirm that skipping beyond the last doc, on the // previous reader, hits NO_MORE_DOCS AtomicReader previousReader = lastReader[0]; IndexSearcher indexSearcher = LuceneTestCase.NewSearcher( #if FEATURE_INSTANCE_TESTDATA_INITIALIZATION luceneTestCase, #endif previousReader, false); indexSearcher.Similarity = s.Similarity; Weight w = indexSearcher.CreateNormalizedWeight(q); AtomicReaderContext ctx = (AtomicReaderContext)previousReader.Context; Scorer scorer = w.GetScorer(ctx, ((AtomicReader)ctx.Reader).LiveDocs); if (scorer != null) { bool more = scorer.Advance(lastDoc[0] + 1) != DocIdSetIterator.NO_MORE_DOCS; Assert.IsFalse(more, "query's last doc was " + lastDoc[0] + " but skipTo(" + (lastDoc[0] + 1) + ") got to " + scorer.DocID); } } } }
public override void DoWork() { for (int i = 0; i < 100; i++) { IndexReader ir = DirectoryReader.Open(Directory); IndexSearcher @is = OuterInstance.NewSearcher(ir); ir.Dispose(); } Count += 100; }
/// <summary> /// Given an IndexSearcher, returns a new IndexSearcher whose IndexReader /// is a MultiReader containing the Reader of the original IndexSearcher, /// as well as several "empty" IndexReaders -- some of which will have /// deleted documents in them. this new IndexSearcher should /// behave exactly the same as the original IndexSearcher. </summary> /// <param name="s"> the searcher to wrap </param> /// <param name="edge"> if negative, s will be the first sub; if 0, s will be in the middle, if positive s will be the last sub </param> /// <param name="similarity"> /// LUCENENET specific /// Removes dependency on <see cref="LuceneTestCase.ClassEnv.Similarity"/> /// </param> public static IndexSearcher WrapUnderlyingReader(Random random, IndexSearcher s, int edge, Similarity similarity) { IndexReader r = s.IndexReader; // we can't put deleted docs before the nested reader, because // it will throw off the docIds IndexReader[] readers = new IndexReader[] { edge < 0 ? r : EmptyReaders[0], EmptyReaders[0], new FCInvisibleMultiReader(edge < 0 ? EmptyReaders[4] : EmptyReaders[0], EmptyReaders[0], 0 == edge ? r : EmptyReaders[0]), 0 < edge ? EmptyReaders[0] : EmptyReaders[7], EmptyReaders[0], new FCInvisibleMultiReader(0 < edge ? EmptyReaders[0] : EmptyReaders[5], EmptyReaders[0], 0 < edge ? r : EmptyReaders[0]) }; IndexSearcher @out = LuceneTestCase.NewSearcher(new FCInvisibleMultiReader(readers), similarity); @out.Similarity = s.Similarity; return(@out); }
public override void DoWork() { for (int i = 0; i < 100; i++) { IndexReader ir = DirectoryReader.Open(directory); IndexSearcher @is = #if FEATURE_INSTANCE_TESTDATA_INITIALIZATION outerInstance. #endif NewSearcher(ir); ir.Dispose(); } count += 100; }
public override IndexSearcher NewSearcher(IndexReader ignored) { return(LuceneTestCase.NewSearcher(Other)); }