public virtual void SetNextReader(AtomicReaderContext context) { // confirm that skipping beyond the last doc, on the // previous reader, hits NO_MORE_DOCS if (lastReader[0] != null) { AtomicReader previousReader = lastReader[0]; IndexSearcher indexSearcher = LuceneTestCase.NewSearcher( #if FEATURE_INSTANCE_TESTDATA_INITIALIZATION luceneTestCase, #endif previousReader); indexSearcher.Similarity = s.Similarity; Weight w = indexSearcher.CreateNormalizedWeight(q); Scorer scorer = w.GetScorer((AtomicReaderContext)indexSearcher.TopReaderContext, previousReader.LiveDocs); if (scorer != null) { bool more = scorer.Advance(lastDoc[0] + 1) != DocIdSetIterator.NO_MORE_DOCS; Assert.IsFalse(more, "query's last doc was " + lastDoc[0] + " but skipTo(" + (lastDoc[0] + 1) + ") got to " + scorer.DocID); } leafPtr++; } lastReader[0] = (AtomicReader)context.Reader; lastDoc[0] = -1; liveDocs = ((AtomicReader)context.Reader).LiveDocs; }
public virtual void SetNextReader(AtomicReaderContext context) { // confirm that skipping beyond the last doc, on the // previous reader, hits NO_MORE_DOCS if (lastReader[0] != null) { AtomicReader previousReader = lastReader[0]; IndexSearcher indexSearcher = LuceneTestCase.NewSearcher(previousReader); indexSearcher.Similarity = s.Similarity; Weight w = indexSearcher.CreateNormalizedWeight(q); AtomicReaderContext ctx = (AtomicReaderContext)indexSearcher.TopReaderContext; Scorer scorer = w.GetScorer(ctx, ((AtomicReader)ctx.Reader).LiveDocs); if (scorer != null) { bool more = scorer.Advance(lastDoc[0] + 1) != DocIdSetIterator.NO_MORE_DOCS; Assert.IsFalse(more, "query's last doc was " + lastDoc[0] + " but skipTo(" + (lastDoc[0] + 1) + ") got to " + scorer.DocID); } leafPtr++; } lastReader[0] = (AtomicReader)context.Reader; if (Debugging.AssertsEnabled) { Debugging.Assert(readerContextArray[leafPtr].Reader == context.Reader); } this.scorer = null; lastDoc[0] = -1; }
/// <summary> /// Sets up a RAMDirectory, and adds documents (using English.IntToEnglish()) with two fields: field and multiField /// and analyzes them using the PayloadAnalyzer </summary> /// <param name="similarity"> The Similarity class to use in the Searcher </param> /// <param name="numDocs"> The num docs to add </param> /// <returns> An IndexSearcher </returns> // TODO: randomize public virtual IndexSearcher SetUp(Random random, Similarity similarity, int numDocs) { Directory directory = new MockDirectoryWrapper(random, new RAMDirectory()); PayloadAnalyzer analyzer = new PayloadAnalyzer(this); // TODO randomize this IndexWriter writer = new IndexWriter(directory, (new IndexWriterConfig(LuceneTestCase.TEST_VERSION_CURRENT, analyzer)).SetSimilarity(similarity)); // writer.infoStream = System.out; for (int i = 0; i < numDocs; i++) { Document doc = new Document(); doc.Add(new TextField(FIELD, English.Int32ToEnglish(i), Field.Store.YES)); doc.Add(new TextField(MULTI_FIELD, English.Int32ToEnglish(i) + " " + English.Int32ToEnglish(i), Field.Store.YES)); doc.Add(new TextField(NO_PAYLOAD_FIELD, English.Int32ToEnglish(i), Field.Store.YES)); writer.AddDocument(doc); } Reader = DirectoryReader.Open(writer, true); writer.Dispose(); IndexSearcher searcher = LuceneTestCase.NewSearcher( #if FEATURE_INSTANCE_TESTDATA_INITIALIZATION null, // LUCENENET: Passing null allows us to bypass similarity, which we are setting here, anyway #endif Reader); searcher.Similarity = similarity; return(searcher); }
/// <summary> /// Tests that a query matches the an expected set of documents using a /// HitCollector. /// <para> /// Note that when using the HitCollector API, documents will be collected /// if they "match" regardless of what their score is. /// </para> /// </summary> /// <param name="luceneTestCase"> The current test instance. </param> /// <param name="query"> The query to test. </param> /// <param name="searcher"> The searcher to test the query against. </param> /// <param name="defaultFieldName"> Used for displaying the query in assertion messages. </param> /// <param name="results"> A list of documentIds that must match the query. </param> /// <seealso cref="DoCheckHits(LuceneTestCase, Random, Query, string, IndexSearcher, int[])"/> // LUCENENET specific // Removes dependency on <see cref="LuceneTestCase.ClassEnv.Similarity"/> public static void CheckHitCollector(LuceneTestCase luceneTestCase, Random random, Query query, string defaultFieldName, IndexSearcher searcher, int[] results) #endif { QueryUtils.Check( #if FEATURE_INSTANCE_TESTDATA_INITIALIZATION luceneTestCase, #endif random, query, searcher); Trace.TraceInformation("Checked"); SortedSet <int?> correct = new SortedSet <int?>(); for (int i = 0; i < results.Length; i++) { correct.Add(Convert.ToInt32(results[i], CultureInfo.InvariantCulture)); } SortedSet <int?> actual = new SortedSet <int?>(); ICollector c = new SetCollector(actual); searcher.Search(query, c); Assert.AreEqual(correct, actual, "Simple: " + query.ToString(defaultFieldName)); for (int i = -1; i < 2; i++) { actual.Clear(); IndexSearcher s = QueryUtils.WrapUnderlyingReader( #if FEATURE_INSTANCE_TESTDATA_INITIALIZATION luceneTestCase, #endif random, searcher, i); s.Search(query, c); Assert.AreEqual(correct, actual, "Wrap Reader " + i + ": " + query.ToString(defaultFieldName)); } }
/// <summary> /// Sets up a RAMDirectory, and adds documents (using English.IntToEnglish()) with two fields: field and multiField /// and analyzes them using the PayloadAnalyzer </summary> /// <param name="similarity"> The Similarity class to use in the Searcher </param> /// <param name="numDocs"> The num docs to add </param> /// <returns> An IndexSearcher </returns> // TODO: randomize public virtual IndexSearcher SetUp(Random random, Similarity similarity, int numDocs) { Directory directory = new MockDirectoryWrapper(random, new RAMDirectory()); PayloadAnalyzer analyzer = new PayloadAnalyzer(this); // TODO randomize this IndexWriter writer = new IndexWriter(directory, (new IndexWriterConfig(LuceneTestCase.TEST_VERSION_CURRENT, analyzer)).SetSimilarity(similarity)); // writer.infoStream = System.out; for (int i = 0; i < numDocs; i++) { Document doc = new Document(); doc.Add(new TextField(FIELD, English.IntToEnglish(i), Field.Store.YES)); doc.Add(new TextField(MULTI_FIELD, English.IntToEnglish(i) + " " + English.IntToEnglish(i), Field.Store.YES)); doc.Add(new TextField(NO_PAYLOAD_FIELD, English.IntToEnglish(i), Field.Store.YES)); writer.AddDocument(doc); } Reader = DirectoryReader.Open(writer, true); writer.Dispose(); IndexSearcher searcher = LuceneTestCase.NewSearcher(Reader, similarity); searcher.Similarity = similarity; return(searcher); }
/// <summary> /// Tests that a query matches the an expected set of documents using Hits. /// /// <para>Note that when using the Hits API, documents will only be returned /// if they have a positive normalized score. /// </para> /// </summary> /// <param name="luceneTestCase"> The current test instance. </param> /// <param name="query"> the query to test </param> /// <param name="searcher"> the searcher to test the query against </param> /// <param name="defaultFieldName"> used for displaing the query in assertion messages </param> /// <param name="results"> a list of documentIds that must match the query </param> /// <seealso cref="CheckHitCollector(LuceneTestCase, Random, Query, string, IndexSearcher, int[])"/> // LUCENENET specific // Removes dependency on <see cref="LuceneTestCase.ClassEnv.Similarity"/> public static void DoCheckHits(LuceneTestCase luceneTestCase, Random random, Query query, string defaultFieldName, IndexSearcher searcher, int[] results) #endif { ScoreDoc[] hits = searcher.Search(query, 1000).ScoreDocs; SortedSet <int?> correct = new SortedSet <int?>(); for (int i = 0; i < results.Length; i++) { correct.Add(Convert.ToInt32(results[i], CultureInfo.InvariantCulture)); } SortedSet <int?> actual = new SortedSet <int?>(); for (int i = 0; i < hits.Length; i++) { actual.Add(Convert.ToInt32(hits[i].Doc, CultureInfo.InvariantCulture)); } Assert.AreEqual(correct, actual, query.ToString(defaultFieldName)); QueryUtils.Check( #if FEATURE_INSTANCE_TESTDATA_INITIALIZATION luceneTestCase, #endif random, query, searcher, LuceneTestCase.Rarely(random)); }
/// <param name="luceneTestCase">The current test instance.</param> // LUCENENET specific // Removes dependency on <see cref="LuceneTestCase.ClassEnv.Similarity"/> public static void Check(LuceneTestCase luceneTestCase, Random random, Query q1, IndexSearcher s, bool wrap) { try { Check(q1); if (s != null) { CheckFirstSkipTo(luceneTestCase, q1, s); CheckSkipTo(luceneTestCase, q1, s); if (wrap) { Check(luceneTestCase, random, q1, WrapUnderlyingReader(luceneTestCase, random, s, -1), false); Check(luceneTestCase, random, q1, WrapUnderlyingReader(luceneTestCase, random, s, 0), false); Check(luceneTestCase, random, q1, WrapUnderlyingReader(luceneTestCase, random, s, +1), false); } CheckExplanations(q1, s); Query q2 = (Query)q1.Clone(); CheckEqual(s.Rewrite(q1), s.Rewrite(q2)); } } catch (IOException e) { throw new Exception(e.ToString(), e); } }
/// <summary> /// Given an <see cref="IndexSearcher"/>, returns a new <see cref="IndexSearcher"/> whose <see cref="IndexReader"/> /// is a <see cref="MultiReader"/> containing the <see cref="IndexReader"/> of the original <see cref="IndexSearcher"/>, /// as well as several "empty" <see cref="IndexReader"/>s -- some of which will have /// deleted documents in them. This new <see cref="IndexSearcher"/> should /// behave exactly the same as the original <see cref="IndexSearcher"/>. </summary> /// <param name="luceneTestCase">The current test instance.</param> /// <param name="s"> The searcher to wrap. </param> /// <param name="edge"> If negative, s will be the first sub; if 0, s will be in the middle, if positive s will be the last sub. </param> // LUCENENET specific // Removes dependency on <see cref="LuceneTestCase.ClassEnv.Similarity"/> public static IndexSearcher WrapUnderlyingReader(LuceneTestCase luceneTestCase, Random random, IndexSearcher s, int edge) #endif { IndexReader r = s.IndexReader; // we can't put deleted docs before the nested reader, because // it will throw off the docIds IndexReader[] readers = new IndexReader[] { edge < 0 ? r : emptyReaders[0], emptyReaders[0], new FCInvisibleMultiReader(edge < 0 ? emptyReaders[4] : emptyReaders[0], emptyReaders[0], 0 == edge ? r : emptyReaders[0]), 0 < edge ? emptyReaders[0] : emptyReaders[7], emptyReaders[0], new FCInvisibleMultiReader(0 < edge ? emptyReaders[0] : emptyReaders[5], emptyReaders[0], 0 < edge ? r : emptyReaders[0]) }; IndexSearcher @out = LuceneTestCase.NewSearcher( #if FEATURE_INSTANCE_TESTDATA_INITIALIZATION luceneTestCase, #endif new FCInvisibleMultiReader(readers)); @out.Similarity = s.Similarity; return(@out); }
public static void CreateIndex(Random random, Directory dir, bool multiSegment) { IndexWriter.Unlock(dir); IndexWriter w = new IndexWriter(dir, LuceneTestCase.NewIndexWriterConfig(random, TEST_VERSION_CURRENT, new MockAnalyzer(random)).SetMergePolicy(new LogDocMergePolicy())); for (int i = 0; i < 100; i++) { w.AddDocument(CreateDocument(i, 4)); if (multiSegment && (i % 10) == 0) { w.Commit(); } } if (!multiSegment) { w.ForceMerge(1); } w.Dispose(); DirectoryReader r = DirectoryReader.Open(dir); if (multiSegment) { Assert.IsTrue(r.Leaves.Count > 1); } else { Assert.IsTrue(r.Leaves.Count == 1); } r.Dispose(); }
public static void IndexSerial(Random random, IDictionary <string, Document> docs, Directory dir) { IndexWriter w = new IndexWriter(dir, LuceneTestCase.NewIndexWriterConfig(random, TEST_VERSION_CURRENT, new MockAnalyzer(random)).SetMergePolicy(NewLogMergePolicy())); // index all docs in a single thread IEnumerator <Document> iter = docs.Values.GetEnumerator(); while (iter.MoveNext()) { Document d = iter.Current; List <IndexableField> fields = new List <IndexableField>(); fields.AddRange(d.Fields); // put fields in same order each time fields.Sort(fieldNameComparator); Document d1 = new Document(); for (int i = 0; i < fields.Count; i++) { d1.Add(fields[i]); } w.AddDocument(d1); // System.out.println("indexing "+d1); } w.Dispose(); }
/// <summary> /// check that first skip on just created scorers always goes to the right doc</summary> /// <param name = "similarity" > /// LUCENENET specific /// Removes dependency on <see cref="LuceneTestCase.ClassEnv.Similarity"/> /// </param> public static void CheckFirstSkipTo(Query q, IndexSearcher s, Similarity similarity) { //System.out.println("checkFirstSkipTo: "+q); const float maxDiff = 1e-3f; int[] lastDoc = new int[] { -1 }; AtomicReader[] lastReader = new AtomicReader[] { null }; IList <AtomicReaderContext> context = s.TopReaderContext.Leaves; s.Search(q, new CollectorAnonymousInnerClassHelper2(q, s, maxDiff, lastDoc, lastReader, context, similarity)); if (lastReader[0] != null) { // confirm that skipping beyond the last doc, on the // previous reader, hits NO_MORE_DOCS AtomicReader previousReader = lastReader[0]; IndexSearcher indexSearcher = LuceneTestCase.NewSearcher(previousReader, similarity); indexSearcher.Similarity = s.Similarity; Weight w = indexSearcher.CreateNormalizedWeight(q); Scorer scorer = w.Scorer((AtomicReaderContext)indexSearcher.TopReaderContext, previousReader.LiveDocs); if (scorer != null) { bool more = scorer.Advance(lastDoc[0] + 1) != DocIdSetIterator.NO_MORE_DOCS; Assert.IsFalse(more, "query's last doc was " + lastDoc[0] + " but skipTo(" + (lastDoc[0] + 1) + ") got to " + scorer.DocID()); } } }
/// <param name="outerInstance"> /// LUCENENET specific /// Passed in because this class acceses non-static methods, /// NewTextField and NewIndexWriterConfig /// </param> public DelayedIndexAndCloseRunnable(LuceneTestCase outerInstance, Directory dir, CountdownEvent iwConstructed) { #if FEATURE_INSTANCE_TESTDATA_INITIALIZATION this.outerInstance = outerInstance; #endif this.dir = dir; this.iwConstructed = iwConstructed; }
public override IndexSearcher NewSearcher(IndexReader ignored) { return(LuceneTestCase.NewSearcher( #if FEATURE_INSTANCE_TESTDATA_INITIALIZATION outerInstance, #endif other)); }
public override void Evaluate() { if (d.TestClass.IsSubclassOf(typeof(NestedTestSuite))) { LuceneTestCase.AssumeTrue("Nested suite class ignored (started as stand-alone).", RunningNested); } s.evaluate(); }
private TokenFilter MaybePayload(TokenFilter stream, string fieldName) { UninterruptableMonitor.Enter(this); try { previousMappings.TryGetValue(fieldName, out int?val); if (val == null) { val = -1; // no payloads if (LuceneTestCase.Rarely(random)) { switch (random.Next(3)) { case 0: // no payloads val = -1; break; case 1: // variable length payload val = int.MaxValue; break; case 2: // fixed length payload val = random.Next(12); break; } } if (LuceneTestCase.Verbose) { if (val == int.MaxValue) { Console.WriteLine("MockAnalyzer: field=" + fieldName + " gets variable length payloads"); } else if (val != -1) { Console.WriteLine("MockAnalyzer: field=" + fieldName + " gets fixed length=" + val + " payloads"); } } previousMappings[fieldName] = val; // save it so we are consistent for this field } if (val == -1) { return(stream); } else if (val == int.MaxValue) { return(new MockVariableLengthPayloadFilter(random, stream)); } else { return(new MockFixedLengthPayloadFilter(random, stream, (int)val)); } } finally { UninterruptableMonitor.Exit(this); } }
/// <summary> /// Alternate scorer skipTo(),skipTo(),next(),next(),skipTo(),skipTo(), etc /// and ensure a hitcollector receives same docs and scores. /// </summary> /// <param name="luceneTestCase">The current test instance.</param> /// <param name="q"></param> /// <param name="s"></param> // LUCENENET specific // Removes dependency on <see cref="LuceneTestCase.ClassEnv.Similarity"/> public static void CheckSkipTo(LuceneTestCase luceneTestCase, Query q, IndexSearcher s) #endif { //System.out.println("Checking "+q); IList <AtomicReaderContext> readerContextArray = s.TopReaderContext.Leaves; if (s.CreateNormalizedWeight(q).ScoresDocsOutOfOrder) // in this case order of skipTo() might differ from that of next(). { return; } const int skip_op = 0; const int next_op = 1; int[][] orders = new int[][] { new int[] { next_op }, new int[] { skip_op }, new int[] { skip_op, next_op }, new int[] { next_op, skip_op }, new int[] { skip_op, skip_op, next_op, next_op }, new int[] { next_op, next_op, skip_op, skip_op }, new int[] { skip_op, skip_op, skip_op, next_op, next_op } }; for (int k = 0; k < orders.Length; k++) { int[] order = orders[k]; // System.out.print("Order:");for (int i = 0; i < order.Length; i++) // System.out.print(order[i]==skip_op ? " skip()":" next()"); // System.out.println(); int[] opidx = new int[] { 0 }; int[] lastDoc = new int[] { -1 }; // FUTURE: ensure scorer.Doc()==-1 const float maxDiff = 1e-5f; AtomicReader[] lastReader = new AtomicReader[] { null }; s.Search(q, new CollectorAnonymousInnerClassHelper( #if FEATURE_INSTANCE_TESTDATA_INITIALIZATION luceneTestCase, #endif q, s, readerContextArray, skip_op, order, opidx, lastDoc, maxDiff, lastReader)); if (lastReader[0] != null) { // confirm that skipping beyond the last doc, on the // previous reader, hits NO_MORE_DOCS AtomicReader previousReader = lastReader[0]; IndexSearcher indexSearcher = LuceneTestCase.NewSearcher( #if FEATURE_INSTANCE_TESTDATA_INITIALIZATION luceneTestCase, #endif previousReader, false); indexSearcher.Similarity = s.Similarity; Weight w = indexSearcher.CreateNormalizedWeight(q); AtomicReaderContext ctx = (AtomicReaderContext)previousReader.Context; Scorer scorer = w.GetScorer(ctx, ((AtomicReader)ctx.Reader).LiveDocs); if (scorer != null) { bool more = scorer.Advance(lastDoc[0] + 1) != DocIdSetIterator.NO_MORE_DOCS; Assert.IsFalse(more, "query's last doc was " + lastDoc[0] + " but skipTo(" + (lastDoc[0] + 1) + ") got to " + scorer.DocID); } } } }
public RandomCodec(Random random, ISet <string> avoidCodecs) { this.perFieldSeed = random.Next(); // TODO: make it possible to specify min/max iterms per // block via CL: int minItemsPerBlock = TestUtil.NextInt32(random, 2, 100); int maxItemsPerBlock = 2 * (Math.Max(2, minItemsPerBlock - 1)) + random.Next(100); int lowFreqCutoff = TestUtil.NextInt32(random, 2, 100); Add(avoidCodecs, new Lucene41PostingsFormat(minItemsPerBlock, maxItemsPerBlock), new FSTPostingsFormat(), new FSTOrdPostingsFormat(), new FSTPulsing41PostingsFormat(1 + random.Next(20)), new FSTOrdPulsing41PostingsFormat(1 + random.Next(20)), new DirectPostingsFormat(LuceneTestCase.Rarely(random) ? 1 : (LuceneTestCase.Rarely(random) ? int.MaxValue : maxItemsPerBlock), LuceneTestCase.Rarely(random) ? 1 : (LuceneTestCase.Rarely(random) ? int.MaxValue : lowFreqCutoff)), new Pulsing41PostingsFormat(1 + random.Next(20), minItemsPerBlock, maxItemsPerBlock), // add pulsing again with (usually) different parameters new Pulsing41PostingsFormat(1 + random.Next(20), minItemsPerBlock, maxItemsPerBlock), //TODO as a PostingsFormat which wraps others, we should allow TestBloomFilteredLucene41Postings to be constructed //with a choice of concrete PostingsFormats. Maybe useful to have a generic means of marking and dealing //with such "wrapper" classes? new TestBloomFilteredLucene41Postings(), new MockSepPostingsFormat(), new MockFixedInt32BlockPostingsFormat(TestUtil.NextInt32(random, 1, 2000)), new MockVariableInt32BlockPostingsFormat(TestUtil.NextInt32(random, 1, 127)), new MockRandomPostingsFormat(random), new NestedPulsingPostingsFormat(), new Lucene41WithOrds(), new SimpleTextPostingsFormat(), new AssertingPostingsFormat(), new MemoryPostingsFormat(true, random.nextFloat()), new MemoryPostingsFormat(false, random.nextFloat()) ); AddDocValues(avoidCodecs, new Lucene45DocValuesFormat(), new DiskDocValuesFormat(), new MemoryDocValuesFormat(), new SimpleTextDocValuesFormat(), new AssertingDocValuesFormat()); formats.Shuffle(random); dvFormats.Shuffle(random); // Avoid too many open files: if (formats.Count > 4) { formats = formats.SubList(0, 4); } if (dvFormats.Count > 4) { dvFormats = dvFormats.SubList(0, 4); } }
/// <summary> /// Given an IndexSearcher, returns a new IndexSearcher whose IndexReader /// is a MultiReader containing the Reader of the original IndexSearcher, /// as well as several "empty" IndexReaders -- some of which will have /// deleted documents in them. this new IndexSearcher should /// behave exactly the same as the original IndexSearcher. </summary> /// <param name="s"> the searcher to wrap </param> /// <param name="edge"> if negative, s will be the first sub; if 0, s will be in the middle, if positive s will be the last sub </param> /// <param name="similarity"> /// LUCENENET specific /// Removes dependency on <see cref="LuceneTestCase.ClassEnv.Similarity"/> /// </param> public static IndexSearcher WrapUnderlyingReader(Random random, IndexSearcher s, int edge, Similarity similarity) { IndexReader r = s.IndexReader; // we can't put deleted docs before the nested reader, because // it will throw off the docIds IndexReader[] readers = new IndexReader[] { edge < 0 ? r : EmptyReaders[0], EmptyReaders[0], new FCInvisibleMultiReader(edge < 0 ? EmptyReaders[4] : EmptyReaders[0], EmptyReaders[0], 0 == edge ? r : EmptyReaders[0]), 0 < edge ? EmptyReaders[0] : EmptyReaders[7], EmptyReaders[0], new FCInvisibleMultiReader(0 < edge ? EmptyReaders[0] : EmptyReaders[5], EmptyReaders[0], 0 < edge ? r : EmptyReaders[0]) }; IndexSearcher @out = LuceneTestCase.NewSearcher(new FCInvisibleMultiReader(readers), similarity); @out.Similarity = s.Similarity; return(@out); }
public override void BeforeClass() { base.BeforeClass(); // NOTE: turn off compound file, this test will open some index files directly. OldFormatImpersonationIsActive = true; IndexWriterConfig config = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random, MockTokenizer.KEYWORD, false)).SetUseCompoundFile(false); TermIndexInterval = config.TermIndexInterval; IndexDivisor = TestUtil.NextInt32(Random, 1, 10); NUMBER_OF_DOCUMENTS = AtLeast(100); NUMBER_OF_FIELDS = AtLeast(Math.Max(10, 3 * TermIndexInterval * IndexDivisor / NUMBER_OF_DOCUMENTS)); Directory = NewDirectory(); config.SetCodec(new PreFlexRWCodec()); LogMergePolicy mp = NewLogMergePolicy(); // NOTE: turn off compound file, this test will open some index files directly. mp.NoCFSRatio = 0.0; config.SetMergePolicy(mp); Populate(Directory, config); DirectoryReader r0 = IndexReader.Open(Directory); SegmentReader r = LuceneTestCase.GetOnlySegmentReader(r0); string segment = r.SegmentName; r.Dispose(); FieldInfosReader infosReader = (new PreFlexRWCodec()).FieldInfosFormat.FieldInfosReader; FieldInfos fieldInfos = infosReader.Read(Directory, segment, "", IOContext.READ_ONCE); string segmentFileName = IndexFileNames.SegmentFileName(segment, "", Lucene3xPostingsFormat.TERMS_INDEX_EXTENSION); long tiiFileLength = Directory.FileLength(segmentFileName); IndexInput input = Directory.OpenInput(segmentFileName, NewIOContext(Random)); TermEnum = new SegmentTermEnum(Directory.OpenInput(IndexFileNames.SegmentFileName(segment, "", Lucene3xPostingsFormat.TERMS_EXTENSION), NewIOContext(Random)), fieldInfos, false); int totalIndexInterval = TermEnum.indexInterval * IndexDivisor; SegmentTermEnum indexEnum = new SegmentTermEnum(input, fieldInfos, true); Index = new TermInfosReaderIndex(indexEnum, IndexDivisor, tiiFileLength, totalIndexInterval); indexEnum.Dispose(); input.Dispose(); Reader = IndexReader.Open(Directory); SampleTerms = Sample(Random, Reader, 1000); }
public CollectorAnonymousInnerClassHelper2( #if FEATURE_INSTANCE_TESTDATA_INITIALIZATION LuceneTestCase luceneTestCase, #endif Query q, IndexSearcher s, float maxDiff, int[] lastDoc, AtomicReader[] lastReader, IList <AtomicReaderContext> context) { #if FEATURE_INSTANCE_TESTDATA_INITIALIZATION this.luceneTestCase = luceneTestCase; #endif this.q = q; this.s = s; this.maxDiff = maxDiff; this.lastDoc = lastDoc; this.lastReader = lastReader; this.context = context; }
private static IndexReader MakeEmptyIndex(Random random, int numDocs) { Debug.Assert(numDocs > 0); Directory d = new MockDirectoryWrapper(random, new RAMDirectory()); IndexWriter w = new IndexWriter(d, new IndexWriterConfig(LuceneTestCase.TEST_VERSION_CURRENT, new MockAnalyzer(random))); for (int i = 0; i < numDocs; i++) { w.AddDocument(new Document()); } w.ForceMerge(1); w.Commit(); w.Dispose(); DirectoryReader reader = DirectoryReader.Open(d); return(new AllDeletedFilterReader(LuceneTestCase.GetOnlySegmentReader(reader))); }
public override void Run() { Random rnd = LuceneTestCase.Random(); while (!Stopped) { int numReaders = Readers.Count; if (numReaders > 0) { ReaderCouple c = Readers[rnd.Next(numReaders)]; TestDirectoryReader.AssertIndexEquals(c.NewReader, c.RefreshedReader); } lock (this) { Monitor.Wait(this, TimeSpan.FromMilliseconds(TestUtil.NextInt(Random(), 1, 100))); } } }
internal static void TriggerOn(SorePoint pt) { if (pt == @where) { switch (Type) { case Lucene.Net.Util.junitcompat.SoreType.ASSUMPTION: LuceneTestCase.assumeTrue(pt.ToString(), false); throw new Exception("unreachable"); case Lucene.Net.Util.junitcompat.SoreType.ERROR: throw new Exception(pt.ToString()); case Lucene.Net.Util.junitcompat.SoreType.FAILURE: Assert.IsTrue(pt.ToString(), false); throw new Exception("unreachable"); } } }
public override void Run() { Random rnd = LuceneTestCase.Random(); while (!Stopped) { if (Index % 2 == 0) { // refresh reader synchronized ReaderCouple c = (OuterInstance.RefreshReader(r, Test, Index, true)); ReadersToClose.Add(c.NewReader); ReadersToClose.Add(c.RefreshedReader); Readers.Add(c); // prevent too many readers break; } else { // not synchronized DirectoryReader refreshed = DirectoryReader.OpenIfChanged(r); if (refreshed == null) { refreshed = r; } IndexSearcher searcher = OuterInstance.NewSearcher(refreshed); ScoreDoc[] hits = searcher.Search(new TermQuery(new Term("field1", "a" + rnd.Next(refreshed.MaxDoc))), null, 1000).ScoreDocs; if (hits.Length > 0) { searcher.Doc(hits[0].Doc); } if (refreshed != r) { refreshed.Dispose(); } } lock (this) { Monitor.Wait(this, TimeSpan.FromMilliseconds(TestUtil.NextInt(Random(), 1, 100))); } } }
public CollectorAnonymousInnerClassHelper( #if FEATURE_INSTANCE_TESTDATA_INITIALIZATION LuceneTestCase luceneTestCase, #endif Query q, IndexSearcher s, IList <AtomicReaderContext> readerContextArray, int skip_op, int[] order, int[] opidx, int[] lastDoc, float maxDiff, AtomicReader[] lastReader) { #if FEATURE_INSTANCE_TESTDATA_INITIALIZATION this.luceneTestCase = luceneTestCase; #endif this.q = q; this.s = s; this.readerContextArray = readerContextArray; this.skip_op = skip_op; this.order = order; this.opidx = opidx; this.lastDoc = lastDoc; this.maxDiff = maxDiff; this.lastReader = lastReader; }
// LUCENENET specific - de-nested SetCollector /// <summary> /// Tests that a query matches the an expected set of documents using Hits. /// /// <para>Note that when using the Hits API, documents will only be returned /// if they have a positive normalized score. /// </para> /// </summary> /// <param name="query"> the query to test </param> /// <param name="searcher"> the searcher to test the query against </param> /// <param name="defaultFieldName"> used for displaing the query in assertion messages </param> /// <param name="results"> a list of documentIds that must match the query </param> /// <seealso cref="CheckHitCollector(Random, Query, string, IndexSearcher, int[])"/> public static void DoCheckHits(Random random, Query query, string defaultFieldName, IndexSearcher searcher, int[] results) { ScoreDoc[] hits = searcher.Search(query, 1000).ScoreDocs; JCG.SortedSet <int> correct = new JCG.SortedSet <int>(); for (int i = 0; i < results.Length; i++) { correct.Add(Convert.ToInt32(results[i], CultureInfo.InvariantCulture)); } JCG.SortedSet <int> actual = new JCG.SortedSet <int>(); for (int i = 0; i < hits.Length; i++) { actual.Add(Convert.ToInt32(hits[i].Doc, CultureInfo.InvariantCulture)); } Assert.AreEqual(correct, actual, aggressive: false, () => query.ToString(defaultFieldName)); QueryUtils.Check(random, query, searcher, LuceneTestCase.Rarely(random)); }
/// <summary> /// Tests that a query matches the an expected set of documents using Hits. /// /// <p> /// Note that when using the Hits API, documents will only be returned /// if they have a positive normalized score. /// </p> </summary> /// <param name="query"> the query to test </param> /// <param name="searcher"> the searcher to test the query against </param> /// <param name="defaultFieldName"> used for displaing the query in assertion messages </param> /// <param name="results"> a list of documentIds that must match the query </param> /// <seealso cref= #checkHitCollector </seealso> public static void DoCheckHits(Random random, Query query, string defaultFieldName, IndexSearcher searcher, int[] results) { ScoreDoc[] hits = searcher.Search(query, 1000).ScoreDocs; SortedSet <int?> correct = new SortedSet <int?>(); for (int i = 0; i < results.Length; i++) { correct.Add(Convert.ToInt32(results[i])); } SortedSet <int?> actual = new SortedSet <int?>(); for (int i = 0; i < hits.Length; i++) { actual.Add(Convert.ToInt32(hits[i].Doc)); } Assert.AreEqual(correct, actual, query.ToString(defaultFieldName)); QueryUtils.Check(random, query, searcher, LuceneTestCase.Rarely(random)); }
/// <summary> /// Various query sanity checks on a searcher, some checks are only done for /// instance of <see cref="IndexSearcher"/>. /// </summary> /// <param name="luceneTestCase"> The current test instance. </param> /// <param name="random">A random instance (usually <see cref="LuceneTestCase.Random"/>).</param> /// <param name="q1">A <see cref="Query"/>.</param> /// <param name="s">An <see cref="IndexSearcher"/>.</param> /// <seealso cref="Check(Query)"/> /// <seealso cref="CheckFirstSkipTo(LuceneTestCase, Query, IndexSearcher)"/> /// <seealso cref="CheckSkipTo(LuceneTestCase, Query, IndexSearcher)"/> /// <seealso cref="CheckExplanations(Query, IndexSearcher)"/> /// <seealso cref="CheckEqual(Query, Query)"/> // LUCENENET specific // Removes static dependency on <see cref="LuceneTestCase.ClassEnvRule.Similarity"/> public static void Check(LuceneTestCase luceneTestCase, Random random, Query q1, IndexSearcher s) { Check(luceneTestCase, random, q1, s, true); }
/// <param name="outerInstance"> /// LUCENENET specific /// Passed in because this class acceses non-static methods, /// NewTextField and NewIndexWriterConfig /// </param> public DelayedIndexAndCloseRunnable(Directory dir, CountdownEvent iwConstructed, LuceneTestCase outerInstance) { this.Dir = dir; this.IwConstructed = iwConstructed; OuterInstance = outerInstance; }
/// <param name="outerInstance"> /// LUCENENET specific /// Passed in because <see cref="LuceneTestCase.NewSearcher(IndexReader)"/> /// is no longer static. /// </param> public SearcherThread(Directory directory, TimedThread[] threads, LuceneTestCase outerInstance) : base(threads) { this.outerInstance = outerInstance; this.directory = directory; }