Exemple #1
0
        public virtual void TestForceFlush()
        {
            Directory dir = NewDirectory();

            LogDocMergePolicy mp = new LogDocMergePolicy();

            mp.MinMergeDocs = 100;
            mp.MergeFactor  = 10;
            IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMaxBufferedDocs(10).SetMergePolicy(mp));

            for (int i = 0; i < 100; i++)
            {
                AddDoc(writer);
                writer.Dispose();

                mp              = new LogDocMergePolicy();
                mp.MergeFactor  = 10;
                writer          = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode_e.APPEND).SetMaxBufferedDocs(10).SetMergePolicy(mp));
                mp.MinMergeDocs = 100;
                CheckInvariants(writer);
            }

            writer.Dispose();
            dir.Dispose();
        }
        public virtual void  TestForceFlush()
        {
            Directory dir = new RAMDirectory();

            IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);

            writer.SetMaxBufferedDocs(10);
            writer.SetMergeFactor(10);
            LogDocMergePolicy mp = new LogDocMergePolicy(writer);

            mp.SetMinMergeDocs(100);
            writer.SetMergePolicy(mp);

            for (int i = 0; i < 100; i++)
            {
                AddDoc(writer);
                writer.Close();

                writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.LIMITED);
                writer.SetMaxBufferedDocs(10);
                writer.SetMergePolicy(mp);
                mp.SetMinMergeDocs(100);
                writer.SetMergeFactor(10);
                CheckInvariants(writer);
            }

            writer.Close();
        }
Exemple #3
0
        public virtual void TestOneLargeOneSmall()
        {
            Directory dir = new RAMDirectory();

            IndexWriterConfig conf   = NewWriterConfig();
            IndexWriter       writer = new IndexWriter(dir, conf);

            AddDocs(writer, 3);
            AddDocs(writer, 5);
            AddDocs(writer, 3);
            AddDocs(writer, 5);

            writer.Dispose();

            conf = NewWriterConfig();
            LogMergePolicy lmp = new LogDocMergePolicy();

            lmp.MaxMergeDocs = 3;
            conf.SetMergePolicy(lmp);

            writer = new IndexWriter(dir, conf);
            writer.ForceMerge(1);
            writer.Dispose();

            SegmentInfos sis = new SegmentInfos();

            sis.Read(dir);
            Assert.AreEqual(4, sis.Size());
        }
Exemple #4
0
        public virtual void TestNumDocsLimit()
        {
            // tests that the max merge docs constraint is applied during forceMerge.
            Directory dir = new RAMDirectory();

            // Prepare an index w/ several small segments and a large one.
            IndexWriterConfig conf   = NewWriterConfig();
            IndexWriter       writer = new IndexWriter(dir, conf);

            AddDocs(writer, 3);
            AddDocs(writer, 3);
            AddDocs(writer, 5);
            AddDocs(writer, 3);
            AddDocs(writer, 3);
            AddDocs(writer, 3);
            AddDocs(writer, 3);

            writer.Dispose();

            conf = NewWriterConfig();
            LogMergePolicy lmp = new LogDocMergePolicy();

            lmp.MaxMergeDocs = 3;
            conf.SetMergePolicy(lmp);

            writer = new IndexWriter(dir, conf);
            writer.ForceMerge(1);
            writer.Dispose();

            // Should only be 3 segments in the index, because one of them exceeds the size limit
            SegmentInfos sis = new SegmentInfos();

            sis.Read(dir);
            Assert.AreEqual(3, sis.Size());
        }
Exemple #5
0
        public virtual void TestSingleMergeableSegment()
        {
            Directory dir = new RAMDirectory();

            IndexWriterConfig conf   = NewWriterConfig();
            IndexWriter       writer = new IndexWriter(dir, conf);

            AddDocs(writer, 3);
            AddDocs(writer, 5);
            AddDocs(writer, 3);

            // delete the last document, so that the last segment is merged.
            writer.DeleteDocuments(new Term("id", "10"));
            writer.Dispose();

            conf = NewWriterConfig();
            LogMergePolicy lmp = new LogDocMergePolicy();

            lmp.MaxMergeDocs = 3;
            conf.SetMergePolicy(lmp);

            writer = new IndexWriter(dir, conf);
            writer.ForceMerge(1);
            writer.Dispose();

            // Verify that the last segment does not have deletions.
            SegmentInfos sis = new SegmentInfos();

            sis.Read(dir);
            Assert.AreEqual(3, sis.Count);
            Assert.IsFalse(sis.Info(2).HasDeletions);
        }
Exemple #6
0
        public virtual void TestMergeFactor()
        {
            Directory dir = new RAMDirectory();

            IndexWriterConfig conf   = NewWriterConfig();
            IndexWriter       writer = new IndexWriter(dir, conf);

            AddDocs(writer, 3);
            AddDocs(writer, 3);
            AddDocs(writer, 3);
            AddDocs(writer, 3);
            AddDocs(writer, 5);
            AddDocs(writer, 3);
            AddDocs(writer, 3);

            writer.Dispose();

            conf = NewWriterConfig();
            LogMergePolicy lmp = new LogDocMergePolicy();

            lmp.MaxMergeDocs = 3;
            lmp.MergeFactor  = 2;
            conf.SetMergePolicy(lmp);

            writer = new IndexWriter(dir, conf);
            writer.ForceMerge(1);
            writer.Dispose();

            // Should only be 4 segments in the index, because of the merge factor and
            // max merge docs settings.
            SegmentInfos sis = new SegmentInfos();

            sis.Read(dir);
            Assert.AreEqual(4, sis.Size());
        }
Exemple #7
0
        public virtual void TestSingleNonMergeableSegment()
        {
            Directory dir = new RAMDirectory();

            IndexWriterConfig conf   = NewWriterConfig();
            IndexWriter       writer = new IndexWriter(dir, conf);

            AddDocs(writer, 3, true);

            writer.Dispose();

            conf = NewWriterConfig();
            LogMergePolicy lmp = new LogDocMergePolicy();

            lmp.MaxMergeDocs = 3;
            conf.SetMergePolicy(lmp);

            writer = new IndexWriter(dir, conf);
            writer.ForceMerge(1);
            writer.Dispose();

            // Verify that the last segment does not have deletions.
            SegmentInfos sis = new SegmentInfos();

            sis.Read(dir);
            Assert.AreEqual(1, sis.Size());
        }
        public virtual void TestAllSegmentsSmall()
        {
            Directory dir = new RAMDirectory();

            IndexWriterConfig conf = NewWriterConfig();
            IndexWriter writer = new IndexWriter(dir, conf);

            AddDocs(writer, 3);
            AddDocs(writer, 3);
            AddDocs(writer, 3);
            AddDocs(writer, 3);

            writer.Dispose();

            conf = NewWriterConfig();
            LogMergePolicy lmp = new LogDocMergePolicy();
            lmp.MaxMergeDocs = 3;
            conf.SetMergePolicy(lmp);

            writer = new IndexWriter(dir, conf);
            writer.ForceMerge(1);
            writer.Dispose();

            SegmentInfos sis = new SegmentInfos();
            sis.Read(dir);
            Assert.AreEqual(1, sis.Size());
        }
Exemple #9
0
        public virtual void TestSingleMergeableTooLargeSegment()
        {
            Directory dir = new RAMDirectory();

            IndexWriterConfig conf   = NewWriterConfig();
            IndexWriter       writer = new IndexWriter(dir, conf);

            AddDocs(writer, 5, true);

            // delete the last document

            writer.DeleteDocuments(new Term("id", "4"));
            writer.Dispose();

            conf = NewWriterConfig();
            LogMergePolicy lmp = new LogDocMergePolicy();

            lmp.MaxMergeDocs = 2;
            conf.SetMergePolicy(lmp);

            writer = new IndexWriter(dir, conf);
            writer.ForceMerge(1);
            writer.Dispose();

            // Verify that the last segment does not have deletions.
            SegmentInfos sis = new SegmentInfos();

            sis.Read(dir);
            Assert.AreEqual(1, sis.Size());
            Assert.IsTrue(sis.Info(0).HasDeletions());
        }
        public virtual void TestMaxNumSegments2([ValueSource(typeof(ConcurrentMergeSchedulerFactories), "Values")] Func <IConcurrentMergeScheduler> newScheduler)
        {
            Directory dir = NewDirectory();

            Document doc = new Document();

            doc.Add(NewStringField("content", "aaa", Field.Store.NO));

            LogDocMergePolicy ldmp = new LogDocMergePolicy();

            ldmp.MinMergeDocs = 1;
            ldmp.MergeFactor  = 4;
            var config = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random))
                         .SetMaxBufferedDocs(2)
                         .SetMergePolicy(ldmp)
                         .SetMergeScheduler(newScheduler());
            IndexWriter writer = new IndexWriter(dir, config);

            for (int iter = 0; iter < 10; iter++)
            {
                for (int i = 0; i < 19; i++)
                {
                    writer.AddDocument(doc);
                }

                writer.Commit();
                writer.WaitForMerges();
                writer.Commit();

                SegmentInfos sis = new SegmentInfos();
                sis.Read(dir);

                int segCount = sis.Count;
                writer.ForceMerge(7);
                writer.Commit();
                writer.WaitForMerges();

                sis = new SegmentInfos();
                sis.Read(dir);
                int optSegCount = sis.Count;

                if (segCount < 7)
                {
                    Assert.AreEqual(segCount, optSegCount);
                }
                else
                {
                    Assert.AreEqual(7, optSegCount, "seg: " + segCount);
                }
            }
            writer.Dispose();
            dir.Dispose();
        }
Exemple #11
0
        public virtual void TestDeleteMerging()
        {
            Directory directory = NewDirectory();

            LogDocMergePolicy mp = new LogDocMergePolicy();

            // Force degenerate merging so we can get a mix of
            // merging of segments with and without deletes at the
            // start:
            mp.MinMergeDocs = 1000;
            IndexWriter writer = new IndexWriter(directory, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMergePolicy(mp));

            Document doc     = new Document();
            Field    idField = NewStringField("id", "", Field.Store.YES);

            doc.Add(idField);
            for (int i = 0; i < 10; i++)
            {
                if (VERBOSE)
                {
                    Console.WriteLine("\nTEST: cycle");
                }
                for (int j = 0; j < 100; j++)
                {
                    idField.StringValue = Convert.ToString(i * 100 + j);
                    writer.AddDocument(doc);
                }

                int delID = i;
                while (delID < 100 * (1 + i))
                {
                    if (VERBOSE)
                    {
                        Console.WriteLine("TEST: del " + delID);
                    }
                    writer.DeleteDocuments(new Term("id", "" + delID));
                    delID += 10;
                }

                writer.Commit();
            }

            writer.Dispose();
            IndexReader reader = DirectoryReader.Open(directory);

            // Verify that we did not lose any deletes...
            Assert.AreEqual(450, reader.NumDocs);
            reader.Dispose();
            directory.Dispose();
        }
Exemple #12
0
        public virtual void  TestDeleteMerging()
        {
            RAMDirectory directory = new MockRAMDirectory();

            IndexWriter writer           = new IndexWriter(directory, ANALYZER, true, IndexWriter.MaxFieldLength.UNLIMITED);
            ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();

            writer.SetMergeScheduler(cms);

            LogDocMergePolicy mp = new LogDocMergePolicy(writer);

            writer.SetMergePolicy(mp);

            // Force degenerate merging so we can get a mix of
            // merging of segments with and without deletes at the
            // start:
            mp.MinMergeDocs = 1000;

            Document doc     = new Document();
            Field    idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);

            doc.Add(idField);
            for (int i = 0; i < 10; i++)
            {
                for (int j = 0; j < 100; j++)
                {
                    idField.SetValue(System.Convert.ToString(i * 100 + j));
                    writer.AddDocument(doc);
                }

                int delID = i;
                while (delID < 100 * (1 + i))
                {
                    writer.DeleteDocuments(new Term("id", "" + delID));
                    delID += 10;
                }

                writer.Commit();
            }

            writer.Close();
            IndexReader reader = IndexReader.Open(directory, true);

            // Verify that we did not lose any deletes...
            Assert.AreEqual(450, reader.NumDocs());
            reader.Close();
            directory.Close();
        }
        public virtual void TestDeleteMerging()
        {
            Directory directory = NewDirectory();

            LogDocMergePolicy mp = new LogDocMergePolicy();
            // Force degenerate merging so we can get a mix of
            // merging of segments with and without deletes at the
            // start:
            mp.MinMergeDocs = 1000;
            IndexWriter writer = new IndexWriter(directory, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMergePolicy(mp));

            Document doc = new Document();
            Field idField = NewStringField("id", "", Field.Store.YES);
            doc.Add(idField);
            for (int i = 0; i < 10; i++)
            {
                if (VERBOSE)
                {
                    Console.WriteLine("\nTEST: cycle");
                }
                for (int j = 0; j < 100; j++)
                {
                    idField.StringValue = Convert.ToString(i * 100 + j);
                    writer.AddDocument(doc);
                }

                int delID = i;
                while (delID < 100 * (1 + i))
                {
                    if (VERBOSE)
                    {
                        Console.WriteLine("TEST: del " + delID);
                    }
                    writer.DeleteDocuments(new Term("id", "" + delID));
                    delID += 10;
                }

                writer.Commit();
            }

            writer.Dispose();
            IndexReader reader = DirectoryReader.Open(directory);
            // Verify that we did not lose any deletes...
            Assert.AreEqual(450, reader.NumDocs);
            reader.Dispose();
            directory.Dispose();
        }
        public virtual void TestPartialMerge()
        {
            Directory dir = NewDirectory();

            Document doc = new Document();

            doc.Add(NewStringField("content", "aaa", Field.Store.NO));
            int incrMin = TEST_NIGHTLY ? 15 : 40;

            for (int numDocs = 10; numDocs < 500; numDocs += TestUtil.NextInt32(Random, incrMin, 5 * incrMin))
            {
                LogDocMergePolicy ldmp = new LogDocMergePolicy();
                ldmp.MinMergeDocs = 1;
                ldmp.MergeFactor  = 5;
                IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random)).SetOpenMode(OpenMode.CREATE).SetMaxBufferedDocs(2).SetMergePolicy(ldmp));
                for (int j = 0; j < numDocs; j++)
                {
                    writer.AddDocument(doc);
                }
                writer.Dispose();

                SegmentInfos sis = new SegmentInfos();
                sis.Read(dir);
                int segCount = sis.Count;

                ldmp             = new LogDocMergePolicy();
                ldmp.MergeFactor = 5;
                writer           = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random)).SetMergePolicy(ldmp));
                writer.ForceMerge(3);
                writer.Dispose();

                sis = new SegmentInfos();
                sis.Read(dir);
                int optSegCount = sis.Count;

                if (segCount < 3)
                {
                    Assert.AreEqual(segCount, optSegCount);
                }
                else
                {
                    Assert.AreEqual(3, optSegCount);
                }
            }
            dir.Dispose();
        }
Exemple #15
0
        public virtual void TestMaxBufferedDocsChange()
        {
            Directory dir = NewDirectory();

            IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMaxBufferedDocs(101).SetMergePolicy(new LogDocMergePolicy()).SetMergeScheduler(new SerialMergeScheduler()));

            // leftmost* segment has 1 doc
            // rightmost* segment has 100 docs
            for (int i = 1; i <= 100; i++)
            {
                for (int j = 0; j < i; j++)
                {
                    AddDoc(writer);
                    CheckInvariants(writer);
                }
                writer.Dispose();

                writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode_e.APPEND).SetMaxBufferedDocs(101).SetMergePolicy(new LogDocMergePolicy()).SetMergeScheduler(new SerialMergeScheduler()));
            }

            writer.Dispose();
            LogDocMergePolicy ldmp = new LogDocMergePolicy();

            ldmp.MergeFactor = 10;
            writer           = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode_e.APPEND).SetMaxBufferedDocs(10).SetMergePolicy(ldmp).SetMergeScheduler(new SerialMergeScheduler()));

            // merge policy only fixes segments on levels where merges
            // have been triggered, so check invariants after all adds
            for (int i = 0; i < 100; i++)
            {
                AddDoc(writer);
            }
            CheckInvariants(writer);

            for (int i = 100; i < 1000; i++)
            {
                AddDoc(writer);
            }
            writer.Commit();
            writer.WaitForMerges();
            writer.Commit();
            CheckInvariants(writer);

            writer.Dispose();
            dir.Dispose();
        }
Exemple #16
0
        public virtual void TestMergeDocCount0([ValueSource(typeof(ConcurrentMergeSchedulers), "Values")] IConcurrentMergeScheduler scheduler)
        {
            Directory dir = NewDirectory();

            LogDocMergePolicy ldmp = new LogDocMergePolicy();

            ldmp.MergeFactor = 100;
            IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMaxBufferedDocs(10).SetMergePolicy(ldmp));

            for (int i = 0; i < 250; i++)
            {
                AddDoc(writer);
                CheckInvariants(writer);
            }
            writer.Dispose();

            // delete some docs without merging
            writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMergePolicy(NoMergePolicy.NO_COMPOUND_FILES));
            writer.DeleteDocuments(new Term("content", "aaa"));
            writer.Dispose();

            ldmp             = new LogDocMergePolicy();
            ldmp.MergeFactor = 5;
            var config = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()))
                         .SetOpenMode(OpenMode_e.APPEND)
                         .SetMaxBufferedDocs(10)
                         .SetMergePolicy(ldmp)
                         .SetMergeScheduler(scheduler);

            writer = new IndexWriter(dir, config);

            // merge factor is changed, so check invariants after all adds
            for (int i = 0; i < 10; i++)
            {
                AddDoc(writer);
            }
            writer.Commit();
            writer.WaitForMerges();
            writer.Commit();
            CheckInvariants(writer);
            Assert.AreEqual(10, writer.MaxDoc);

            writer.Dispose();
            dir.Dispose();
        }
        public virtual void TestPartialMerge()
        {
            Directory dir = NewDirectory();

            Document doc = new Document();
            doc.Add(NewStringField("content", "aaa", Field.Store.NO));
            int incrMin = TEST_NIGHTLY ? 15 : 40;
            for (int numDocs = 10; numDocs < 500; numDocs += TestUtil.NextInt(Random(), incrMin, 5 * incrMin))
            {
                LogDocMergePolicy ldmp = new LogDocMergePolicy();
                ldmp.MinMergeDocs = 1;
                ldmp.MergeFactor = 5;
                IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode_e.CREATE).SetMaxBufferedDocs(2).SetMergePolicy(ldmp));
                for (int j = 0; j < numDocs; j++)
                {
                    writer.AddDocument(doc);
                }
                writer.Dispose();

                SegmentInfos sis = new SegmentInfos();
                sis.Read(dir);
                int segCount = sis.Size();

                ldmp = new LogDocMergePolicy();
                ldmp.MergeFactor = 5;
                writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMergePolicy(ldmp));
                writer.ForceMerge(3);
                writer.Dispose();

                sis = new SegmentInfos();
                sis.Read(dir);
                int optSegCount = sis.Size();

                if (segCount < 3)
                {
                    Assert.AreEqual(segCount, optSegCount);
                }
                else
                {
                    Assert.AreEqual(3, optSegCount);
                }
            }
            dir.Dispose();
        }
        public virtual void TestForceFlush()
        {
            Directory dir = NewDirectory();

            LogDocMergePolicy mp = new LogDocMergePolicy();
            mp.MinMergeDocs = 100;
            mp.MergeFactor = 10;
            IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMaxBufferedDocs(10).SetMergePolicy(mp));

            for (int i = 0; i < 100; i++)
            {
                AddDoc(writer);
                writer.Dispose();

                mp = new LogDocMergePolicy();
                mp.MergeFactor = 10;
                writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode_e.APPEND).SetMaxBufferedDocs(10).SetMergePolicy(mp));
                mp.MinMergeDocs = 100;
                CheckInvariants(writer);
            }

            writer.Dispose();
            dir.Dispose();
        }
		public virtual void  TestOptimizeMaxNumSegments()
		{
			
			MockRAMDirectory dir = new MockRAMDirectory();
			
			Document doc = new Document();
			doc.Add(new Field("content", "aaa", Field.Store.YES, Field.Index.TOKENIZED));
			
			for (int numDocs = 38; numDocs < 500; numDocs += 38)
			{
				IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
				LogDocMergePolicy ldmp = new LogDocMergePolicy();
				ldmp.SetMinMergeDocs(1);
				writer.SetMergePolicy(ldmp);
				writer.SetMergeFactor(5);
				writer.SetMaxBufferedDocs(2);
				for (int j = 0; j < numDocs; j++)
					writer.AddDocument(doc);
				writer.Close();
				
				SegmentInfos sis = new SegmentInfos();
				sis.Read(dir);
				int segCount = sis.Count;
				
				writer = new IndexWriter(dir, new WhitespaceAnalyzer());
				writer.SetMergePolicy(ldmp);
				writer.SetMergeFactor(5);
				writer.Optimize(3);
				writer.Close();
				
				sis = new SegmentInfos();
				sis.Read(dir);
				int optSegCount = sis.Count;
				
				if (segCount < 3)
					Assert.AreEqual(segCount, optSegCount);
				else
					Assert.AreEqual(3, optSegCount);
			}
		}
        public virtual void TestMaxNumSegments2()
        {
            Directory dir = NewDirectory();

            Document doc = new Document();
            doc.Add(NewStringField("content", "aaa", Field.Store.NO));

            LogDocMergePolicy ldmp = new LogDocMergePolicy();
            ldmp.MinMergeDocs = 1;
            ldmp.MergeFactor = 4;
            IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMaxBufferedDocs(2).SetMergePolicy(ldmp).SetMergeScheduler(new ConcurrentMergeScheduler()));

            for (int iter = 0; iter < 10; iter++)
            {
                for (int i = 0; i < 19; i++)
                {
                    writer.AddDocument(doc);
                }

                writer.Commit();
                writer.WaitForMerges();
                writer.Commit();

                SegmentInfos sis = new SegmentInfos();
                sis.Read(dir);

                int segCount = sis.Size();
                writer.ForceMerge(7);
                writer.Commit();
                writer.WaitForMerges();

                sis = new SegmentInfos();
                sis.Read(dir);
                int optSegCount = sis.Size();

                if (segCount < 7)
                {
                    Assert.AreEqual(segCount, optSegCount);
                }
                else
                {
                    Assert.AreEqual(7, optSegCount, "seg: " + segCount);
                }
            }
            writer.Dispose();
            dir.Dispose();
        }
        public virtual void TestMaxBufferedDocsChange()
        {
            Directory dir = NewDirectory();

            IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMaxBufferedDocs(101).SetMergePolicy(new LogDocMergePolicy()).SetMergeScheduler(new SerialMergeScheduler()));

            // leftmost* segment has 1 doc
            // rightmost* segment has 100 docs
            for (int i = 1; i <= 100; i++)
            {
                for (int j = 0; j < i; j++)
                {
                    AddDoc(writer);
                    CheckInvariants(writer);
                }
                writer.Dispose();

                writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode_e.APPEND).SetMaxBufferedDocs(101).SetMergePolicy(new LogDocMergePolicy()).SetMergeScheduler(new SerialMergeScheduler()));
            }

            writer.Dispose();
            LogDocMergePolicy ldmp = new LogDocMergePolicy();
            ldmp.MergeFactor = 10;
            writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode_e.APPEND).SetMaxBufferedDocs(10).SetMergePolicy(ldmp).SetMergeScheduler(new SerialMergeScheduler()));

            // merge policy only fixes segments on levels where merges
            // have been triggered, so check invariants after all adds
            for (int i = 0; i < 100; i++)
            {
                AddDoc(writer);
            }
            CheckInvariants(writer);

            for (int i = 100; i < 1000; i++)
            {
                AddDoc(writer);
            }
            writer.Commit();
            writer.WaitForMerges();
            writer.Commit();
            CheckInvariants(writer);

            writer.Dispose();
            dir.Dispose();
        }
        public virtual void TestMergeFactor()
        {
            Directory dir = new RAMDirectory();

            IndexWriterConfig conf = NewWriterConfig();
            IndexWriter writer = new IndexWriter(dir, conf);

            AddDocs(writer, 3);
            AddDocs(writer, 3);
            AddDocs(writer, 3);
            AddDocs(writer, 3);
            AddDocs(writer, 5);
            AddDocs(writer, 3);
            AddDocs(writer, 3);

            writer.Dispose();

            conf = NewWriterConfig();
            LogMergePolicy lmp = new LogDocMergePolicy();
            lmp.MaxMergeDocs = 3;
            lmp.MergeFactor = 2;
            conf.SetMergePolicy(lmp);

            writer = new IndexWriter(dir, conf);
            writer.ForceMerge(1);
            writer.Dispose();

            // Should only be 4 segments in the index, because of the merge factor and
            // max merge docs settings.
            SegmentInfos sis = new SegmentInfos();
            sis.Read(dir);
            Assert.AreEqual(4, sis.Size());
        }
        public virtual void TestSingleNonMergeableSegment()
        {
            Directory dir = new RAMDirectory();

            IndexWriterConfig conf = NewWriterConfig();
            IndexWriter writer = new IndexWriter(dir, conf);

            AddDocs(writer, 3, true);

            writer.Dispose();

            conf = NewWriterConfig();
            LogMergePolicy lmp = new LogDocMergePolicy();
            lmp.MaxMergeDocs = 3;
            conf.SetMergePolicy(lmp);

            writer = new IndexWriter(dir, conf);
            writer.ForceMerge(1);
            writer.Dispose();

            // Verify that the last segment does not have deletions.
            SegmentInfos sis = new SegmentInfos();
            sis.Read(dir);
            Assert.AreEqual(1, sis.Size());
        }
		public virtual void  TestDeleteMerging()
		{
			
			RAMDirectory directory = new MockRAMDirectory();
			
			IndexWriter writer = new IndexWriter(directory, true, ANALYZER, true);
			ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
			writer.SetMergeScheduler(cms);
			
			LogDocMergePolicy mp = new LogDocMergePolicy(writer);
			writer.SetMergePolicy(mp);
			
			// Force degenerate merging so we can get a mix of
			// merging of segments with and without deletes at the
			// start:
			mp.SetMinMergeDocs(1000);
			
			Document doc = new Document();
			Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
			doc.Add(idField);
			for (int i = 0; i < 10; i++)
			{
				for (int j = 0; j < 100; j++)
				{
					idField.SetValue(System.Convert.ToString(i * 100 + j));
					writer.AddDocument(doc);
				}
				
				int delID = i;
				while (delID < 100 * (1 + i))
				{
					writer.DeleteDocuments(new Term("id", "" + delID));
					delID += 10;
				}
				
				writer.Flush();
			}
			
			writer.Close();
			IndexReader reader = IndexReader.Open(directory);
			// Verify that we did not lose any deletes...
			Assert.AreEqual(450, reader.NumDocs());
			reader.Close();
			directory.Close();
		}
 public virtual void  TestForceFlush()
 {
     Directory dir = new RAMDirectory();
     
     IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
     writer.SetMaxBufferedDocs(10);
     writer.MergeFactor = 10;
     LogDocMergePolicy mp = new LogDocMergePolicy(writer);
     mp.MinMergeDocs = 100;
     writer.SetMergePolicy(mp);
     
     for (int i = 0; i < 100; i++)
     {
         AddDoc(writer);
         writer.Close();
         
         writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.LIMITED);
         writer.SetMaxBufferedDocs(10);
         writer.SetMergePolicy(mp);
         mp.MinMergeDocs = 100;
         writer.MergeFactor = 10;
         CheckInvariants(writer);
     }
     
     writer.Close();
 }
        public virtual void TestSingleMergeableTooLargeSegment()
        {
            Directory dir = new RAMDirectory();

            IndexWriterConfig conf = NewWriterConfig();
            IndexWriter writer = new IndexWriter(dir, conf);

            AddDocs(writer, 5, true);

            // delete the last document

            writer.DeleteDocuments(new Term("id", "4"));
            writer.Dispose();

            conf = NewWriterConfig();
            LogMergePolicy lmp = new LogDocMergePolicy();
            lmp.MaxMergeDocs = 2;
            conf.SetMergePolicy(lmp);

            writer = new IndexWriter(dir, conf);
            writer.ForceMerge(1);
            writer.Dispose();

            // Verify that the last segment does not have deletions.
            SegmentInfos sis = new SegmentInfos();
            sis.Read(dir);
            Assert.AreEqual(1, sis.Size());
            Assert.IsTrue(sis.Info(0).HasDeletions());
        }
        public virtual void TestNumDocsLimit()
        {
            // tests that the max merge docs constraint is applied during forceMerge.
            Directory dir = new RAMDirectory();

            // Prepare an index w/ several small segments and a large one.
            IndexWriterConfig conf = NewWriterConfig();
            IndexWriter writer = new IndexWriter(dir, conf);

            AddDocs(writer, 3);
            AddDocs(writer, 3);
            AddDocs(writer, 5);
            AddDocs(writer, 3);
            AddDocs(writer, 3);
            AddDocs(writer, 3);
            AddDocs(writer, 3);

            writer.Dispose();

            conf = NewWriterConfig();
            LogMergePolicy lmp = new LogDocMergePolicy();
            lmp.MaxMergeDocs = 3;
            conf.SetMergePolicy(lmp);

            writer = new IndexWriter(dir, conf);
            writer.ForceMerge(1);
            writer.Dispose();

            // Should only be 3 segments in the index, because one of them exceeds the size limit
            SegmentInfos sis = new SegmentInfos();
            sis.Read(dir);
            Assert.AreEqual(3, sis.Size());
        }
		public virtual void  TestOptimizeMaxNumSegments2()
		{
			MockRAMDirectory dir = new MockRAMDirectory();
			
			Document doc = new Document();
			doc.Add(new Field("content", "aaa", Field.Store.YES, Field.Index.TOKENIZED));
			
			IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
			LogDocMergePolicy ldmp = new LogDocMergePolicy();
			ldmp.SetMinMergeDocs(1);
			writer.SetMergePolicy(ldmp);
			writer.SetMergeFactor(4);
			writer.SetMaxBufferedDocs(2);
			
			for (int iter = 0; iter < 10; iter++)
			{
				
				for (int i = 0; i < 19; i++)
					writer.AddDocument(doc);
				
				writer.Flush();
				
				SegmentInfos sis = new SegmentInfos();
				((ConcurrentMergeScheduler) writer.GetMergeScheduler()).Sync();
				sis.Read(dir);
				
				int segCount = sis.Count;
				
				writer.Optimize(7);
				
				sis = new SegmentInfos();
				((ConcurrentMergeScheduler) writer.GetMergeScheduler()).Sync();
				sis.Read(dir);
				int optSegCount = sis.Count;
				
				if (segCount < 7)
					Assert.AreEqual(segCount, optSegCount);
				else
					Assert.AreEqual(7, optSegCount);
			}
		}
        public virtual void TestMergeDocCount0([ValueSource(typeof(ConcurrentMergeSchedulers), "Values")]IConcurrentMergeScheduler scheduler)
        {
            Directory dir = NewDirectory();

            LogDocMergePolicy ldmp = new LogDocMergePolicy();
            ldmp.MergeFactor = 100;
            IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMaxBufferedDocs(10).SetMergePolicy(ldmp));

            for (int i = 0; i < 250; i++)
            {
                AddDoc(writer);
                CheckInvariants(writer);
            }
            writer.Dispose();

            // delete some docs without merging
            writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMergePolicy(NoMergePolicy.NO_COMPOUND_FILES));
            writer.DeleteDocuments(new Term("content", "aaa"));
            writer.Dispose();

            ldmp = new LogDocMergePolicy();
            ldmp.MergeFactor = 5;
            var config = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()))
                .SetOpenMode(OpenMode_e.APPEND)
                .SetMaxBufferedDocs(10)
                .SetMergePolicy(ldmp)
                .SetMergeScheduler(scheduler);
            writer = new IndexWriter(dir, config);

            // merge factor is changed, so check invariants after all adds
            for (int i = 0; i < 10; i++)
            {
                AddDoc(writer);
            }
            writer.Commit();
            writer.WaitForMerges();
            writer.Commit();
            CheckInvariants(writer);
            Assert.AreEqual(10, writer.MaxDoc);

            writer.Dispose();
            dir.Dispose();
        }