public virtual void TestTotalBytesSize() { Directory d = NewDirectory(); if (d is MockDirectoryWrapper) { ((MockDirectoryWrapper)d).Throttling = MockDirectoryWrapper.Throttling_e.NEVER; } IndexWriterConfig iwc = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())); iwc.SetMaxBufferedDocs(5); iwc.SetMergeScheduler(new TrackingCMS()); if (TestUtil.GetPostingsFormat("id").Equals("SimpleText")) { // no iwc.SetCodec(TestUtil.AlwaysPostingsFormat(new Lucene41PostingsFormat())); } RandomIndexWriter w = new RandomIndexWriter(Random(), d, iwc); for (int i = 0; i < 1000; i++) { Document doc = new Document(); doc.Add(new StringField("id", "" + i, Field.Store.NO)); w.AddDocument(doc); if (Random().NextBoolean()) { w.DeleteDocuments(new Term("id", "" + Random().Next(i + 1))); } } Assert.IsTrue(((TrackingCMS)w.w.Config.MergeScheduler).TotMergedBytes != 0); w.Dispose(); d.Dispose(); }
public virtual void TestZeroTerms() { var d = NewDirectory(); RandomIndexWriter w = new RandomIndexWriter( #if FEATURE_INSTANCE_TESTDATA_INITIALIZATION this, #endif Random, d); Document doc = new Document(); doc.Add(NewTextField("field", "one two three", Field.Store.NO)); doc = new Document(); doc.Add(NewTextField("field2", "one two three", Field.Store.NO)); w.AddDocument(doc); w.Commit(); w.DeleteDocuments(new Term("field", "one")); w.ForceMerge(1); IndexReader r = w.GetReader(); w.Dispose(); Assert.AreEqual(1, r.NumDocs); Assert.AreEqual(1, r.MaxDoc); Terms terms = MultiFields.GetTerms(r, "field"); if (terms != null) { Assert.IsNull(terms.GetIterator(null).Next()); } r.Dispose(); d.Dispose(); }
public virtual void Test() { Directory dir = NewDirectory(); IndexWriterConfig conf = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())); conf.SetCodec(new Lucene46Codec()); RandomIndexWriter riw = new RandomIndexWriter(Random(), dir, conf); Document doc = new Document(); // these fields should sometimes get term vectors, etc Field idField = NewStringField("id", "", Field.Store.NO); Field bodyField = NewTextField("body", "", Field.Store.NO); Field dvField = new NumericDocValuesField("dv", 5); doc.Add(idField); doc.Add(bodyField); doc.Add(dvField); for (int i = 0; i < 100; i++) { idField.SetStringValue(Convert.ToString(i)); bodyField.SetStringValue(TestUtil.RandomUnicodeString(Random())); riw.AddDocument(doc); if (Random().Next(7) == 0) { riw.Commit(); } if (Random().Next(20) == 0) { riw.DeleteDocuments(new Term("id", Convert.ToString(i))); } } riw.Dispose(); CheckHeaders(dir); dir.Dispose(); }
public virtual void TestZeroTerms() { var d = NewDirectory(); RandomIndexWriter w = new RandomIndexWriter(Random(), d, Similarity, TimeZone); Document doc = new Document(); doc.Add(NewTextField("field", "one two three", Field.Store.NO)); doc = new Document(); doc.Add(NewTextField("field2", "one two three", Field.Store.NO)); w.AddDocument(doc); w.Commit(); w.DeleteDocuments(new Term("field", "one")); w.ForceMerge(1); IndexReader r = w.Reader; w.Dispose(); Assert.AreEqual(1, r.NumDocs); Assert.AreEqual(1, r.MaxDoc); Terms terms = MultiFields.GetTerms(r, "field"); if (terms != null) { Assert.IsNull(terms.Iterator(null).Next()); } r.Dispose(); d.Dispose(); }
// [Test] // LUCENENET NOTE: For now, we are overriding this test in every subclass to pull it into the right context for the subclass public virtual void TestBulkMergeWithDeletes() { int numDocs = AtLeast(200); Directory dir = NewDirectory(); RandomIndexWriter w = new RandomIndexWriter(Random(), dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMergePolicy(NoMergePolicy.COMPOUND_FILES)); for (int i = 0; i < numDocs; ++i) { Document doc = new Document(); doc.Add(new StringField("id", Convert.ToString(i, CultureInfo.InvariantCulture), Field.Store.YES)); doc.Add(new StoredField("f", TestUtil.RandomSimpleString(Random()))); w.AddDocument(doc); } int deleteCount = TestUtil.NextInt(Random(), 5, numDocs); for (int i = 0; i < deleteCount; ++i) { int id = Random().Next(numDocs); w.DeleteDocuments(new Term("id", Convert.ToString(id, CultureInfo.InvariantCulture))); } w.Commit(); w.Dispose(); w = new RandomIndexWriter(Random(), dir, ClassEnvRule.similarity, ClassEnvRule.timeZone); w.ForceMerge(TestUtil.NextInt(Random(), 1, 3)); w.Commit(); w.Dispose(); TestUtil.CheckIndex(dir); dir.Dispose(); }
public virtual void TestForceMergeDeletesMaxSegSize() { Directory dir = NewDirectory(); IndexWriterConfig conf = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random)); TieredMergePolicy tmp = new TieredMergePolicy(); tmp.MaxMergedSegmentMB = 0.01; tmp.ForceMergeDeletesPctAllowed = 0.0; conf.SetMergePolicy(tmp); RandomIndexWriter w = new RandomIndexWriter(Random, dir, conf); w.DoRandomForceMerge = false; int numDocs = AtLeast(200); for (int i = 0; i < numDocs; i++) { Document doc = new Document(); doc.Add(NewStringField("id", "" + i, Field.Store.NO)); doc.Add(NewTextField("content", "aaa " + i, Field.Store.NO)); w.AddDocument(doc); } w.ForceMerge(1); IndexReader r = w.GetReader(); Assert.AreEqual(numDocs, r.MaxDoc); Assert.AreEqual(numDocs, r.NumDocs); r.Dispose(); if (VERBOSE) { Console.WriteLine("\nTEST: delete doc"); } w.DeleteDocuments(new Term("id", "" + (42 + 17))); r = w.GetReader(); Assert.AreEqual(numDocs, r.MaxDoc); Assert.AreEqual(numDocs - 1, r.NumDocs); r.Dispose(); w.ForceMergeDeletes(); r = w.GetReader(); Assert.AreEqual(numDocs - 1, r.MaxDoc); Assert.AreEqual(numDocs - 1, r.NumDocs); r.Dispose(); w.Dispose(); dir.Dispose(); }
public virtual void TestSumDocFreq_Mem() { int numDocs = AtLeast(500); Directory dir = NewDirectory(); RandomIndexWriter writer = new RandomIndexWriter( #if FEATURE_INSTANCE_TESTDATA_INITIALIZATION this, #endif Random, dir); Document doc = new Document(); Field id = NewStringField("id", "", Field.Store.NO); Field field1 = NewTextField("foo", "", Field.Store.NO); Field field2 = NewTextField("bar", "", Field.Store.NO); doc.Add(id); doc.Add(field1); doc.Add(field2); for (int i = 0; i < numDocs; i++) { id.SetStringValue("" + i); char ch1 = (char)TestUtil.NextInt32(Random, 'a', 'z'); char ch2 = (char)TestUtil.NextInt32(Random, 'a', 'z'); field1.SetStringValue("" + ch1 + " " + ch2); ch1 = (char)TestUtil.NextInt32(Random, 'a', 'z'); ch2 = (char)TestUtil.NextInt32(Random, 'a', 'z'); field2.SetStringValue("" + ch1 + " " + ch2); writer.AddDocument(doc); } IndexReader ir = writer.GetReader(); AssertSumDocFreq(ir); ir.Dispose(); int numDeletions = AtLeast(20); for (int i = 0; i < numDeletions; i++) { writer.DeleteDocuments(new Term("id", "" + Random.Next(numDocs))); } writer.ForceMerge(1); writer.Dispose(); ir = DirectoryReader.Open(dir); AssertSumDocFreq(ir); ir.Dispose(); dir.Dispose(); }
public virtual void TestMerge() { RandomDocumentFactory docFactory = new RandomDocumentFactory(this, 5, 20); int numDocs = AtLeast(100); int numDeletes = Random.Next(numDocs); HashSet <int?> deletes = new HashSet <int?>(); while (deletes.Count < numDeletes) { deletes.Add(Random.Next(numDocs)); } foreach (Options options in ValidOptions()) { RandomDocument[] docs = new RandomDocument[numDocs]; for (int i = 0; i < numDocs; ++i) { docs[i] = docFactory.NewDocument(TestUtil.NextInt32(Random, 1, 3), AtLeast(10), options); } using (Directory dir = NewDirectory()) using (RandomIndexWriter writer = new RandomIndexWriter(Random, dir, ClassEnvRule.similarity, ClassEnvRule.timeZone)) { for (int i = 0; i < numDocs; ++i) { writer.AddDocument(AddId(docs[i].ToDocument(), "" + i)); if (Rarely()) { writer.Commit(); } } foreach (int delete in deletes) { writer.DeleteDocuments(new Term("id", "" + delete)); } // merge with deletes writer.ForceMerge(1); using (IndexReader reader = writer.GetReader()) { for (int i = 0; i < numDocs; ++i) { if (!deletes.Contains(i)) { int docID = DocID(reader, "" + i); AssertEquals(docs[i], reader.GetTermVectors(docID)); } } } // reader.Dispose(); }// writer.Dispose();, dir.Dispose(); } }
public virtual void TestSumDocFreq_Mem() { int numDocs = AtLeast(500); Directory dir = NewDirectory(); RandomIndexWriter writer = new RandomIndexWriter(Random(), dir, Similarity, TimeZone); Document doc = new Document(); Field id = NewStringField("id", "", Field.Store.NO); Field field1 = NewTextField("foo", "", Field.Store.NO); Field field2 = NewTextField("bar", "", Field.Store.NO); doc.Add(id); doc.Add(field1); doc.Add(field2); for (int i = 0; i < numDocs; i++) { id.SetStringValue("" + i); char ch1 = (char)TestUtil.NextInt(Random(), 'a', 'z'); char ch2 = (char)TestUtil.NextInt(Random(), 'a', 'z'); field1.SetStringValue("" + ch1 + " " + ch2); ch1 = (char)TestUtil.NextInt(Random(), 'a', 'z'); ch2 = (char)TestUtil.NextInt(Random(), 'a', 'z'); field2.SetStringValue("" + ch1 + " " + ch2); writer.AddDocument(doc); } IndexReader ir = writer.Reader; AssertSumDocFreq(ir); ir.Dispose(); int numDeletions = AtLeast(20); for (int i = 0; i < numDeletions; i++) { writer.DeleteDocuments(new Term("id", "" + Random().Next(numDocs))); } writer.ForceMerge(1); writer.Dispose(); ir = DirectoryReader.Open(dir); AssertSumDocFreq(ir); ir.Dispose(); dir.Dispose(); }
public virtual void TestDeleteByTermIsCurrent() { // get reader DirectoryReader reader = Writer.GetReader(); // assert index has a document and reader is up2date Assert.AreEqual(1, Writer.NumDocs, "One document should be in the index"); Assert.IsTrue(reader.IsCurrent(), "One document added, reader should be current"); // remove document Term idTerm = new Term("UUID", "1"); Writer.DeleteDocuments(idTerm); Writer.Commit(); // assert document has been deleted (index changed), reader is stale Assert.AreEqual(0, Writer.NumDocs, "Document should be removed"); Assert.IsFalse(reader.IsCurrent(), "Reader should be stale"); reader.Dispose(); }
public override void Run() { try { while (Operations.Get() > 0) { int oper = rand.Next(100); if (oper < CommitPercent) { if (NumCommitting.IncrementAndGet() <= MaxConcurrentCommits) { IDictionary <int, long> newCommittedModel; long version; DirectoryReader oldReader; lock (OuterInstance) { newCommittedModel = new Dictionary <int, long>(OuterInstance.Model); // take a snapshot version = OuterInstance.SnapshotCount++; oldReader = OuterInstance.Reader; oldReader.IncRef(); // increment the reference since we will use this for reopening } DirectoryReader newReader; if (rand.Next(100) < SoftCommitPercent) { // assertU(h.Commit("softCommit","true")); if (Random().NextBoolean()) { if (VERBOSE) { Console.WriteLine("TEST: " + Thread.CurrentThread.Name + ": call writer.getReader"); } newReader = Writer.GetReader(true); } else { if (VERBOSE) { Console.WriteLine("TEST: " + Thread.CurrentThread.Name + ": reopen reader=" + oldReader + " version=" + version); } newReader = DirectoryReader.OpenIfChanged(oldReader, Writer.w, true); } } else { // assertU(commit()); if (VERBOSE) { Console.WriteLine("TEST: " + Thread.CurrentThread.Name + ": commit+reopen reader=" + oldReader + " version=" + version); } Writer.Commit(); if (VERBOSE) { Console.WriteLine("TEST: " + Thread.CurrentThread.Name + ": now reopen after commit"); } newReader = DirectoryReader.OpenIfChanged(oldReader); } // Code below assumes newReader comes w/ // extra ref: if (newReader == null) { oldReader.IncRef(); newReader = oldReader; } oldReader.DecRef(); lock (OuterInstance) { // install the new reader if it's newest (and check the current version since another reader may have already been installed) //System.out.println(Thread.currentThread().getName() + ": newVersion=" + newReader.getVersion()); Debug.Assert(newReader.RefCount > 0); Debug.Assert(OuterInstance.Reader.RefCount > 0); if (newReader.Version > OuterInstance.Reader.Version) { if (VERBOSE) { Console.WriteLine("TEST: " + Thread.CurrentThread.Name + ": install new reader=" + newReader); } OuterInstance.Reader.DecRef(); OuterInstance.Reader = newReader; // Silly: forces fieldInfos to be // loaded so we don't hit IOE on later // reader.toString newReader.ToString(); // install this snapshot only if it's newer than the current one if (version >= OuterInstance.CommittedModelClock) { if (VERBOSE) { Console.WriteLine("TEST: " + Thread.CurrentThread.Name + ": install new model version=" + version); } OuterInstance.CommittedModel = newCommittedModel; OuterInstance.CommittedModelClock = version; } else { if (VERBOSE) { Console.WriteLine("TEST: " + Thread.CurrentThread.Name + ": skip install new model version=" + version); } } } else { // if the same reader, don't decRef. if (VERBOSE) { Console.WriteLine("TEST: " + Thread.CurrentThread.Name + ": skip install new reader=" + newReader); } newReader.DecRef(); } } } NumCommitting.DecrementAndGet(); } else { int id = rand.Next(Ndocs); object sync = OuterInstance.SyncArr[id]; // set the lastId before we actually change it sometimes to try and // uncover more race conditions between writing and reading bool before = Random().NextBoolean(); if (before) { OuterInstance.LastId = id; } // We can't concurrently update the same document and retain our invariants of increasing values // since we can't guarantee what order the updates will be executed. lock (sync) { long val = OuterInstance.Model[id]; long nextVal = Math.Abs(val) + 1; if (oper < CommitPercent + DeletePercent) { // assertU("<delete><id>" + id + "</id></delete>"); // add tombstone first if (Tombstones) { Document d = new Document(); d.Add(OuterInstance.NewStringField("id", "-" + Convert.ToString(id), Documents.Field.Store.YES)); d.Add(OuterInstance.NewField(OuterInstance.Field, Convert.ToString(nextVal), StoredOnlyType)); Writer.UpdateDocument(new Term("id", "-" + Convert.ToString(id)), d); } if (VERBOSE) { Console.WriteLine("TEST: " + Thread.CurrentThread.Name + ": term delDocs id:" + id + " nextVal=" + nextVal); } Writer.DeleteDocuments(new Term("id", Convert.ToString(id))); OuterInstance.Model[id] = -nextVal; } else if (oper < CommitPercent + DeletePercent + DeleteByQueryPercent) { //assertU("<delete><query>id:" + id + "</query></delete>"); // add tombstone first if (Tombstones) { Document d = new Document(); d.Add(OuterInstance.NewStringField("id", "-" + Convert.ToString(id), Documents.Field.Store.YES)); d.Add(OuterInstance.NewField(OuterInstance.Field, Convert.ToString(nextVal), StoredOnlyType)); Writer.UpdateDocument(new Term("id", "-" + Convert.ToString(id)), d); } if (VERBOSE) { Console.WriteLine("TEST: " + Thread.CurrentThread.Name + ": query delDocs id:" + id + " nextVal=" + nextVal); } Writer.DeleteDocuments(new TermQuery(new Term("id", Convert.ToString(id)))); OuterInstance.Model[id] = -nextVal; } else { // assertU(adoc("id",Integer.toString(id), field, Long.toString(nextVal))); Document d = new Document(); d.Add(OuterInstance.NewStringField("id", Convert.ToString(id), Documents.Field.Store.YES)); d.Add(OuterInstance.NewField(OuterInstance.Field, Convert.ToString(nextVal), StoredOnlyType)); if (VERBOSE) { Console.WriteLine("TEST: " + Thread.CurrentThread.Name + ": u id:" + id + " val=" + nextVal); } Writer.UpdateDocument(new Term("id", Convert.ToString(id)), d); if (Tombstones) { // remove tombstone after new addition (this should be optional?) Writer.DeleteDocuments(new Term("id", "-" + Convert.ToString(id))); } OuterInstance.Model[id] = nextVal; } } if (!before) { OuterInstance.LastId = id; } } } } catch (Exception e) { Console.WriteLine(Thread.CurrentThread.Name + ": FAILED: unexpected exception"); Console.WriteLine(e.StackTrace); throw new Exception(e.Message, e); } }
public virtual void TestPostings() { Directory dir = NewFSDirectory(CreateTempDir("postings")); IndexWriterConfig iwc = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())); iwc.SetCodec(Codec.ForName("Lucene40")); RandomIndexWriter iw = new RandomIndexWriter(Random(), dir, iwc); Document doc = new Document(); // id field FieldType idType = new FieldType(StringField.TYPE_NOT_STORED); idType.StoreTermVectors = true; Field idField = new Field("id", "", idType); doc.Add(idField); // title field: short text field FieldType titleType = new FieldType(TextField.TYPE_NOT_STORED); titleType.StoreTermVectors = true; titleType.StoreTermVectorPositions = true; titleType.StoreTermVectorOffsets = true; titleType.IndexOptions = IndexOptions(); Field titleField = new Field("title", "", titleType); doc.Add(titleField); // body field: long text field FieldType bodyType = new FieldType(TextField.TYPE_NOT_STORED); bodyType.StoreTermVectors = true; bodyType.StoreTermVectorPositions = true; bodyType.StoreTermVectorOffsets = true; bodyType.IndexOptions = IndexOptions(); Field bodyField = new Field("body", "", bodyType); doc.Add(bodyField); int numDocs = AtLeast(1000); for (int i = 0; i < numDocs; i++) { idField.StringValue = Convert.ToString(i); titleField.StringValue = FieldValue(1); bodyField.StringValue = FieldValue(3); iw.AddDocument(doc); if (Random().Next(20) == 0) { iw.DeleteDocuments(new Term("id", Convert.ToString(i))); } } if (Random().NextBoolean()) { // delete 1-100% of docs iw.DeleteDocuments(new Term("title", Terms[Random().Next(Terms.Length)])); } iw.Dispose(); dir.Dispose(); // checkindex }
// [Test] // LUCENENET NOTE: For now, we are overriding this test in every subclass to pull it into the right context for the subclass public virtual void TestRandomStoredFields() { Directory dir = NewDirectory(); Random rand = Random(); RandomIndexWriter w = new RandomIndexWriter(rand, dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMaxBufferedDocs(TestUtil.NextInt(rand, 5, 20))); //w.w.setNoCFSRatio(0.0); int docCount = AtLeast(200); int fieldCount = TestUtil.NextInt(rand, 1, 5); IList <int?> fieldIDs = new List <int?>(); FieldType customType = new FieldType(TextField.TYPE_STORED); customType.IsTokenized = false; Field idField = NewField("id", "", customType); for (int i = 0; i < fieldCount; i++) { fieldIDs.Add(i); } IDictionary <string, Document> docs = new Dictionary <string, Document>(); if (VERBOSE) { Console.WriteLine("TEST: build index docCount=" + docCount); } FieldType customType2 = new FieldType(); customType2.IsStored = true; for (int i = 0; i < docCount; i++) { Document doc = new Document(); doc.Add(idField); string id = "" + i; idField.SetStringValue(id); docs[id] = doc; if (VERBOSE) { Console.WriteLine("TEST: add doc id=" + id); } foreach (int field in fieldIDs) { string s; if (rand.Next(4) != 3) { s = TestUtil.RandomUnicodeString(rand, 1000); doc.Add(NewField("f" + field, s, customType2)); } else { s = null; } } w.AddDocument(doc); if (rand.Next(50) == 17) { // mixup binding of field name -> Number every so often Collections.Shuffle(fieldIDs); } if (rand.Next(5) == 3 && i > 0) { string delID = "" + rand.Next(i); if (VERBOSE) { Console.WriteLine("TEST: delete doc id=" + delID); } w.DeleteDocuments(new Term("id", delID)); docs.Remove(delID); } } if (VERBOSE) { Console.WriteLine("TEST: " + docs.Count + " docs in index; now load fields"); } if (docs.Count > 0) { string[] idsList = docs.Keys.ToArray(/*new string[docs.Count]*/); for (int x = 0; x < 2; x++) { IndexReader r = w.Reader; IndexSearcher s = NewSearcher(r); if (VERBOSE) { Console.WriteLine("TEST: cycle x=" + x + " r=" + r); } int num = AtLeast(1000); for (int iter = 0; iter < num; iter++) { string testID = idsList[rand.Next(idsList.Length)]; if (VERBOSE) { Console.WriteLine("TEST: test id=" + testID); } TopDocs hits = s.Search(new TermQuery(new Term("id", testID)), 1); Assert.AreEqual(1, hits.TotalHits); Document doc = r.Document(hits.ScoreDocs[0].Doc); Document docExp = docs[testID]; for (int i = 0; i < fieldCount; i++) { assertEquals("doc " + testID + ", field f" + fieldCount + " is wrong", docExp.Get("f" + i), doc.Get("f" + i)); } } r.Dispose(); w.ForceMerge(1); } } w.Dispose(); dir.Dispose(); }
public override void SetUp() { base.SetUp(); Dir = NewDirectory(); Iw = new RandomIndexWriter(Random(), Dir); Document doc = new Document(); Field idField = new StringField("id", "", Field.Store.NO); doc.Add(idField); // add 500 docs with id 0..499 for (int i = 0; i < 500; i++) { idField.StringValue = Convert.ToString(i); Iw.AddDocument(doc); } // delete 20 of them for (int i = 0; i < 20; i++) { Iw.DeleteDocuments(new Term("id", Convert.ToString(Random().Next(Iw.MaxDoc())))); } Ir = Iw.Reader; @is = NewSearcher(Ir); }
public virtual void TestEnforceDeletions() { Directory dir = NewDirectory(); RandomIndexWriter writer = new RandomIndexWriter(Random(), dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMergeScheduler(new SerialMergeScheduler()).SetMergePolicy(NewLogMergePolicy(10))); // asserts below requires no unexpected merges: // NOTE: cannot use writer.getReader because RIW (on // flipping a coin) may give us a newly opened reader, // but we use .reopen on this reader below and expect to // (must) get an NRT reader: DirectoryReader reader = DirectoryReader.Open(writer.w, true); // same reason we don't wrap? IndexSearcher searcher = NewSearcher(reader, false); // add a doc, refresh the reader, and check that it's there Document doc = new Document(); doc.Add(NewStringField("id", "1", Field.Store.YES)); writer.AddDocument(doc); reader = RefreshReader(reader); searcher = NewSearcher(reader, false); TopDocs docs = searcher.Search(new MatchAllDocsQuery(), 1); Assert.AreEqual(1, docs.TotalHits, "Should find a hit..."); Filter startFilter = new QueryWrapperFilter(new TermQuery(new Term("id", "1"))); CachingWrapperFilter filter = new CachingWrapperFilter(startFilter); docs = searcher.Search(new MatchAllDocsQuery(), filter, 1); Assert.IsTrue(filter.SizeInBytes() > 0); Assert.AreEqual(1, docs.TotalHits, "[query + filter] Should find a hit..."); Query constantScore = new ConstantScoreQuery(filter); docs = searcher.Search(constantScore, 1); Assert.AreEqual(1, docs.TotalHits, "[just filter] Should find a hit..."); // make sure we get a cache hit when we reopen reader // that had no change to deletions // fake delete (deletes nothing): writer.DeleteDocuments(new Term("foo", "bar")); IndexReader oldReader = reader; reader = RefreshReader(reader); Assert.IsTrue(reader == oldReader); int missCount = filter.MissCount; docs = searcher.Search(constantScore, 1); Assert.AreEqual(1, docs.TotalHits, "[just filter] Should find a hit..."); // cache hit: Assert.AreEqual(missCount, filter.MissCount); // now delete the doc, refresh the reader, and see that it's not there writer.DeleteDocuments(new Term("id", "1")); // NOTE: important to hold ref here so GC doesn't clear // the cache entry! Else the assert below may sometimes // fail: oldReader = reader; reader = RefreshReader(reader); searcher = NewSearcher(reader, false); missCount = filter.MissCount; docs = searcher.Search(new MatchAllDocsQuery(), filter, 1); Assert.AreEqual(0, docs.TotalHits, "[query + filter] Should *not* find a hit..."); // cache hit Assert.AreEqual(missCount, filter.MissCount); docs = searcher.Search(constantScore, 1); Assert.AreEqual(0, docs.TotalHits, "[just filter] Should *not* find a hit..."); // apply deletes dynamically: filter = new CachingWrapperFilter(startFilter); writer.AddDocument(doc); reader = RefreshReader(reader); searcher = NewSearcher(reader, false); docs = searcher.Search(new MatchAllDocsQuery(), filter, 1); Assert.AreEqual(1, docs.TotalHits, "[query + filter] Should find a hit..."); missCount = filter.MissCount; Assert.IsTrue(missCount > 0); constantScore = new ConstantScoreQuery(filter); docs = searcher.Search(constantScore, 1); Assert.AreEqual(1, docs.TotalHits, "[just filter] Should find a hit..."); Assert.AreEqual(missCount, filter.MissCount); writer.AddDocument(doc); // NOTE: important to hold ref here so GC doesn't clear // the cache entry! Else the assert below may sometimes // fail: oldReader = reader; reader = RefreshReader(reader); searcher = NewSearcher(reader, false); docs = searcher.Search(new MatchAllDocsQuery(), filter, 1); Assert.AreEqual(2, docs.TotalHits, "[query + filter] Should find 2 hits..."); Assert.IsTrue(filter.MissCount > missCount); missCount = filter.MissCount; constantScore = new ConstantScoreQuery(filter); docs = searcher.Search(constantScore, 1); Assert.AreEqual(2, docs.TotalHits, "[just filter] Should find a hit..."); Assert.AreEqual(missCount, filter.MissCount); // now delete the doc, refresh the reader, and see that it's not there writer.DeleteDocuments(new Term("id", "1")); reader = RefreshReader(reader); searcher = NewSearcher(reader, false); docs = searcher.Search(new MatchAllDocsQuery(), filter, 1); Assert.AreEqual(0, docs.TotalHits, "[query + filter] Should *not* find a hit..."); // CWF reused the same entry (it dynamically applied the deletes): Assert.AreEqual(missCount, filter.MissCount); docs = searcher.Search(constantScore, 1); Assert.AreEqual(0, docs.TotalHits, "[just filter] Should *not* find a hit..."); // CWF reused the same entry (it dynamically applied the deletes): Assert.AreEqual(missCount, filter.MissCount); // NOTE: silliness to make sure JRE does not eliminate // our holding onto oldReader to prevent // CachingWrapperFilter's WeakHashMap from dropping the // entry: Assert.IsTrue(oldReader != null); reader.Dispose(); writer.Dispose(); dir.Dispose(); }