private void DoTestMissingVsFieldCache(LongProducer longs) { AssumeTrue("Codec does not support GetDocsWithField", DefaultCodecSupportsDocsWithField()); Directory dir = NewDirectory(); IndexWriterConfig conf = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())); RandomIndexWriter writer = new RandomIndexWriter(Random(), dir, conf); Field idField = new StringField("id", "", Field.Store.NO); Field indexedField = NewStringField("indexed", "", Field.Store.NO); Field dvField = new NumericDocValuesField("dv", 0); // index some docs int numDocs = AtLeast(300); // numDocs should be always > 256 so that in case of a codec that optimizes // for numbers of values <= 256, all storage layouts are tested Debug.Assert(numDocs > 256); for (int i = 0; i < numDocs; i++) { idField.StringValue = Convert.ToString(i); long value = longs.Next(); indexedField.StringValue = Convert.ToString(value); dvField.LongValue = value; Document doc = new Document(); doc.Add(idField); // 1/4 of the time we neglect to add the fields if (Random().Next(4) > 0) { doc.Add(indexedField); doc.Add(dvField); } writer.AddDocument(doc); if (Random().Next(31) == 0) { writer.Commit(); } } // delete some docs int numDeletions = Random().Next(numDocs / 10); for (int i = 0; i < numDeletions; i++) { int id = Random().Next(numDocs); writer.DeleteDocuments(new Term("id", Convert.ToString(id))); } // merge some segments and ensure that at least one of them has more than // 256 values writer.ForceMerge(numDocs / 256); writer.Dispose(); // compare DirectoryReader ir = DirectoryReader.Open(dir); foreach (var context in ir.Leaves) { AtomicReader r = context.AtomicReader; Bits expected = FieldCache.DEFAULT.GetDocsWithField(r, "indexed"); Bits actual = FieldCache.DEFAULT.GetDocsWithField(r, "dv"); AssertEquals(expected, actual); } ir.Dispose(); dir.Dispose(); }
private void DoTestNumericsVsStoredFields(LongProducer longs) { Directory dir = NewDirectory(); IndexWriterConfig conf = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())); RandomIndexWriter writer = new RandomIndexWriter(Random(), dir, conf); Document doc = new Document(); Field idField = new StringField("id", "", Field.Store.NO); Field storedField = NewStringField("stored", "", Field.Store.YES); Field dvField = new NumericDocValuesField("dv", 0); doc.Add(idField); doc.Add(storedField); doc.Add(dvField); // index some docs int numDocs = AtLeast(300); // numDocs should be always > 256 so that in case of a codec that optimizes // for numbers of values <= 256, all storage layouts are tested Debug.Assert(numDocs > 256); for (int i = 0; i < numDocs; i++) { idField.StringValue = Convert.ToString(i); long value = longs.Next(); storedField.StringValue = Convert.ToString(value); dvField.LongValue = value; writer.AddDocument(doc); if (Random().Next(31) == 0) { writer.Commit(); } } // delete some docs int numDeletions = Random().Next(numDocs / 10); for (int i = 0; i < numDeletions; i++) { int id = Random().Next(numDocs); writer.DeleteDocuments(new Term("id", Convert.ToString(id))); } // merge some segments and ensure that at least one of them has more than // 256 values writer.ForceMerge(numDocs / 256); writer.Dispose(); // compare DirectoryReader ir = DirectoryReader.Open(dir); foreach (AtomicReaderContext context in ir.Leaves) { AtomicReader r = context.AtomicReader; NumericDocValues docValues = r.GetNumericDocValues("dv"); for (int i = 0; i < r.MaxDoc; i++) { long storedValue = Convert.ToInt64(r.Document(i).Get("stored")); Assert.AreEqual(storedValue, docValues.Get(i)); } } ir.Dispose(); dir.Dispose(); }