public virtual void TestReferenceDecrementIllegally([ValueSource(typeof(ConcurrentMergeSchedulers), "Values")] IConcurrentMergeScheduler scheduler)
        {
            Directory dir    = NewDirectory();
            var       config = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()))
                               .SetMergeScheduler(scheduler);
            IndexWriter     writer = new IndexWriter(dir, config);
            SearcherManager sm     = new SearcherManager(writer, false, new SearcherFactory());

            writer.AddDocument(new Document());
            writer.Commit();
            sm.MaybeRefreshBlocking();

            IndexSearcher acquire  = sm.Acquire();
            IndexSearcher acquire2 = sm.Acquire();

            sm.Release(acquire);
            sm.Release(acquire2);

            acquire = sm.Acquire();
            acquire.IndexReader.DecRef();
            sm.Release(acquire);

            Assert.Throws <InvalidOperationException>(() => sm.Acquire(), "acquire should have thrown an InvalidOperationException since we modified the refCount outside of the manager");

            // sm.Dispose(); -- already closed
            writer.Dispose();
            dir.Dispose();
        }
Beispiel #2
0
        public virtual void TestCrashWhileIndexing(
            [ValueSource(typeof(ConcurrentMergeSchedulers), "Values")] IConcurrentMergeScheduler scheduler)
        {
            // this test relies on being able to open a reader before any commit
            // happened, so we must create an initial commit just to allow that, but
            // before any documents were added.
            IndexWriter          writer = InitIndex(scheduler, Random(), true);
            MockDirectoryWrapper dir    = (MockDirectoryWrapper)writer.Directory;

            // We create leftover files because merging could be
            // running when we crash:
            dir.AssertNoUnrefencedFilesOnClose = false;

            Crash(writer);

            IndexReader reader = DirectoryReader.Open(dir);

            Assert.IsTrue(reader.NumDocs < 157);
            reader.Dispose();

            // Make a new dir, copying from the crashed dir, and
            // open IW on it, to confirm IW "recovers" after a
            // crash:
            Directory dir2 = NewDirectory(dir);

            dir.Dispose();

            (new RandomIndexWriter(Random(), dir2, Similarity, TimeZone)).Dispose();
            dir2.Dispose();
        }
Beispiel #3
0
        private IndexWriter InitIndex(IConcurrentMergeScheduler scheduler, Random random, MockDirectoryWrapper dir, bool initialCommit)
        {
            dir.LockFactory = NoLockFactory.DoNoLockFactory;

            scheduler.SetSuppressExceptions();

            IndexWriter writer = new IndexWriter(dir,
                NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
                .SetMaxBufferedDocs(10)
                .SetMergeScheduler(scheduler));

            if (initialCommit)
            {
                writer.Commit();
            }

            Document doc = new Document();
            doc.Add(NewTextField("content", "aaa", Field.Store.NO));
            doc.Add(NewTextField("id", "0", Field.Store.NO));
            for (int i = 0; i < 157; i++)
            {
                writer.AddDocument(doc);
            }

            return writer;
        }
Beispiel #4
0
        private IndexWriter InitIndex(Func <IConcurrentMergeScheduler> newScheduler, Random random, MockDirectoryWrapper dir, bool initialCommit)
        {
            dir.SetLockFactory(NoLockFactory.GetNoLockFactory());

            IndexWriter writer = new IndexWriter(dir,
                                                 NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
                                                 .SetMaxBufferedDocs(10)
                                                 .SetMergeScheduler(newScheduler()));

            IConcurrentMergeScheduler scheduler = writer.Config.MergeScheduler as IConcurrentMergeScheduler;

            if (scheduler != null)
            {
                scheduler.SetSuppressExceptions();
            }

            if (initialCommit)
            {
                writer.Commit();
            }

            Document doc = new Document();

            doc.Add(NewTextField("content", "aaa", Field.Store.NO));
            doc.Add(NewTextField("id", "0", Field.Store.NO));
            for (int i = 0; i < 157; i++)
            {
                writer.AddDocument(doc);
            }

            return(writer);
        }
Beispiel #5
0
        // Runs test, with one thread, using the specific failure
        // to trigger an IOException
        public virtual void TestSingleThreadFailure(IConcurrentMergeScheduler scheduler, MockDirectoryWrapper.Failure failure)
        {
            MockDirectoryWrapper dir = NewMockDirectory();

            IndexWriter writer     = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMaxBufferedDocs(2).SetMergeScheduler(scheduler));
            Document    doc        = new Document();
            FieldType   customType = new FieldType(TextField.TYPE_STORED);

            customType.StoreTermVectors         = true;
            customType.StoreTermVectorPositions = true;
            customType.StoreTermVectorOffsets   = true;
            doc.Add(NewField("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", customType));

            for (int i = 0; i < 6; i++)
            {
                writer.AddDocument(doc);
            }

            dir.FailOn(failure);
            failure.SetDoFail();
            try
            {
                writer.AddDocument(doc);
                writer.AddDocument(doc);
                writer.Commit();
                Assert.Fail("did not hit exception");
            }
            catch (IOException)
            {
            }
            failure.ClearDoFail();
            writer.AddDocument(doc);
            writer.Dispose(false);
            dir.Dispose();
        }
Beispiel #6
0
        public virtual void TestTransactions_Mem(
            [ValueSource(typeof(ConcurrentMergeSchedulers), "Values")] IConcurrentMergeScheduler scheduler1,
            [ValueSource(typeof(ConcurrentMergeSchedulers), "Values")] IConcurrentMergeScheduler scheduler2)
        {
            Console.WriteLine("Start test");
            // we cant use non-ramdir on windows, because this test needs to double-write.
            MockDirectoryWrapper dir1 = new MockDirectoryWrapper(Random(), new RAMDirectory());
            MockDirectoryWrapper dir2 = new MockDirectoryWrapper(Random(), new RAMDirectory());

            dir1.PreventDoubleWrite = false;
            dir2.PreventDoubleWrite = false;
            dir1.FailOn(new RandomFailure(this));
            dir2.FailOn(new RandomFailure(this));
            dir1.FailOnOpenInput = false;
            dir2.FailOnOpenInput = false;

            // We throw exceptions in deleteFile, which creates
            // leftover files:
            dir1.AssertNoUnrefencedFilesOnClose = false;
            dir2.AssertNoUnrefencedFilesOnClose = false;

            InitIndex(dir1);
            InitIndex(dir2);

            TimedThread[] threads   = new TimedThread[3];
            int           numThread = 0;

            IndexerThread indexerThread = new IndexerThread(this, this, dir1, dir2, scheduler1, scheduler2, threads);

            threads[numThread++] = indexerThread;
            indexerThread.Start();

            SearcherThread searcherThread1 = new SearcherThread(this, dir1, dir2, threads);

            threads[numThread++] = searcherThread1;
            searcherThread1.Start();

            SearcherThread searcherThread2 = new SearcherThread(this, dir1, dir2, threads);

            threads[numThread++] = searcherThread2;
            searcherThread2.Start();

            for (int i = 0; i < numThread; i++)
            {
                threads[i].Join();
            }

            for (int i = 0; i < numThread; i++)
            {
                Assert.IsTrue(!threads[i].Failed);
            }
            dir1.Dispose();
            dir2.Dispose();

            Console.WriteLine("End test");
        }
        public virtual void TestMaxNumSegments2([ValueSource(typeof(ConcurrentMergeSchedulers), "Values")] IConcurrentMergeScheduler scheduler)
        {
            Directory dir = NewDirectory();

            Document doc = new Document();

            doc.Add(NewStringField("content", "aaa", Field.Store.NO));

            LogDocMergePolicy ldmp = new LogDocMergePolicy();

            ldmp.MinMergeDocs = 1;
            ldmp.MergeFactor  = 4;
            var config = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()))
                         .SetMaxBufferedDocs(2)
                         .SetMergePolicy(ldmp)
                         .SetMergeScheduler(scheduler);
            IndexWriter writer = new IndexWriter(dir, config);

            for (int iter = 0; iter < 10; iter++)
            {
                for (int i = 0; i < 19; i++)
                {
                    writer.AddDocument(doc);
                }

                writer.Commit();
                writer.WaitForMerges();
                writer.Commit();

                SegmentInfos sis = new SegmentInfos();
                sis.Read(dir);

                int segCount = sis.Size();
                writer.ForceMerge(7);
                writer.Commit();
                writer.WaitForMerges();

                sis = new SegmentInfos();
                sis.Read(dir);
                int optSegCount = sis.Size();

                if (segCount < 7)
                {
                    Assert.AreEqual(segCount, optSegCount);
                }
                else
                {
                    Assert.AreEqual(7, optSegCount, "seg: " + segCount);
                }
            }
            writer.Dispose();
            dir.Dispose();
        }
Beispiel #8
0
 public IndexerThread(TestTransactions outerInstance, object @lock,
                      Directory dir1, Directory dir2,
                      IConcurrentMergeScheduler scheduler1, IConcurrentMergeScheduler scheduler2,
                      TimedThread[] threads)
     : base(threads)
 {
     _scheduler1        = scheduler1;
     _scheduler2        = scheduler2;
     this.OuterInstance = outerInstance;
     this.@lock         = @lock;
     this.Dir1          = dir1;
     this.Dir2          = dir2;
 }
Beispiel #9
0
        public virtual void TestStressIndexAndSearching([ValueSource(typeof(ConcurrentMergeSchedulers), "Values")] IConcurrentMergeScheduler scheduler)
        {
            Directory            directory = NewDirectory();
            MockDirectoryWrapper wrapper   = directory as MockDirectoryWrapper;

            if (wrapper != null)
            {
                wrapper.AssertNoUnrefencedFilesOnClose = true;
            }

            RunStressTest(directory, scheduler);
            directory.Dispose();
        }
Beispiel #10
0
        public virtual void TestNumerics([ValueSource(typeof(ConcurrentMergeSchedulers), "Values")] IConcurrentMergeScheduler scheduler)
        {
            BaseDirectoryWrapper dir = NewFSDirectory(CreateTempDir("2BNumerics"));

            if (dir is MockDirectoryWrapper)
            {
                ((MockDirectoryWrapper)dir).Throttling = MockDirectoryWrapper.Throttling_e.NEVER;
            }

            IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()))
                                            .SetMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH).SetRAMBufferSizeMB(256.0).SetMergeScheduler(scheduler).SetMergePolicy(NewLogMergePolicy(false, 10)).SetOpenMode(IndexWriterConfig.OpenMode_e.CREATE));

            Document doc = new Document();
            NumericDocValuesField dvField = new NumericDocValuesField("dv", 0);

            doc.Add(dvField);

            for (int i = 0; i < int.MaxValue; i++)
            {
                dvField.LongValue = i;
                w.AddDocument(doc);
                if (i % 100000 == 0)
                {
                    Console.WriteLine("indexed: " + i);
                    Console.Out.Flush();
                }
            }

            w.ForceMerge(1);
            w.Dispose();

            Console.WriteLine("verifying...");
            Console.Out.Flush();

            DirectoryReader r             = DirectoryReader.Open(dir);
            long            expectedValue = 0;

            foreach (AtomicReaderContext context in r.Leaves)
            {
                AtomicReader     reader = context.AtomicReader;
                NumericDocValues dv     = reader.GetNumericDocValues("dv");
                for (int i = 0; i < reader.MaxDoc; i++)
                {
                    Assert.AreEqual(expectedValue, dv.Get(i));
                    expectedValue++;
                }
            }

            r.Dispose();
            dir.Dispose();
        }
        /*
         * Run one indexer and 2 searchers against single index as
         * stress test.
         */

        public virtual void RunStressTest(Directory directory, IConcurrentMergeScheduler mergeScheduler)
        {
            IndexWriter modifier = new IndexWriter(directory, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random)).SetOpenMode(OpenMode.CREATE).SetMaxBufferedDocs(10).SetMergeScheduler(mergeScheduler));

            modifier.Commit();

            TimedThread[] threads   = new TimedThread[4];
            int           numThread = 0;

            // One modifier that writes 10 docs then removes 5, over
            // and over:
            IndexerThread indexerThread = new IndexerThread(modifier, threads, NewStringField, NewTextField);

            threads[numThread++] = indexerThread;
            indexerThread.Start();

            IndexerThread indexerThread2 = new IndexerThread(modifier, threads, NewStringField, NewTextField);

            threads[numThread++] = indexerThread2;
            indexerThread2.Start();

            // Two searchers that constantly just re-instantiate the
            // searcher:
            SearcherThread searcherThread1 = new SearcherThread(directory, threads, this);

            threads[numThread++] = searcherThread1;
            searcherThread1.Start();

            SearcherThread searcherThread2 = new SearcherThread(directory, threads, this);

            threads[numThread++] = searcherThread2;
            searcherThread2.Start();

            for (int i = 0; i < numThread; i++)
            {
                threads[i].Join();
            }

            modifier.Dispose();

            for (int i = 0; i < numThread; i++)
            {
                Assert.IsTrue(!threads[i].failed);
            }

            //System.out.println("    Writer: " + indexerThread.count + " iterations");
            //System.out.println("Searcher 1: " + searcherThread1.count + " searchers created");
            //System.out.println("Searcher 2: " + searcherThread2.count + " searchers created");
        }
Beispiel #12
0
        public virtual void Test([ValueSource(typeof(ConcurrentMergeSchedulers), "Values")] IConcurrentMergeScheduler scheduler)
        {
            BaseDirectoryWrapper dir = NewFSDirectory(CreateTempDir("2BPostings"));

            if (dir is MockDirectoryWrapper)
            {
                ((MockDirectoryWrapper)dir).Throttling = MockDirectoryWrapper.Throttling_e.NEVER;
            }

            var config = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()))
                         .SetMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH)
                         .SetRAMBufferSizeMB(256.0)
                         .SetMergeScheduler(scheduler)
                         .SetMergePolicy(NewLogMergePolicy(false, 10))
                         .SetOpenMode(IndexWriterConfig.OpenMode_e.CREATE);

            IndexWriter w = new IndexWriter(dir, config);

            MergePolicy mp = w.Config.MergePolicy;

            if (mp is LogByteSizeMergePolicy)
            {
                // 1 petabyte:
                ((LogByteSizeMergePolicy)mp).MaxMergeMB = 1024 * 1024 * 1024;
            }

            Document  doc = new Document();
            FieldType ft  = new FieldType(TextField.TYPE_NOT_STORED);

            ft.OmitNorms    = true;
            ft.IndexOptions = FieldInfo.IndexOptions.DOCS_ONLY;
            Field field = new Field("field", new MyTokenStream(), ft);

            doc.Add(field);

            int numDocs = (int.MaxValue / 26) + 1;

            for (int i = 0; i < numDocs; i++)
            {
                w.AddDocument(doc);
                if (VERBOSE && i % 100000 == 0)
                {
                    Console.WriteLine(i + " of " + numDocs + "...");
                }
            }
            w.ForceMerge(1);
            w.Dispose();
            dir.Dispose();
        }
Beispiel #13
0
        public virtual void TestImmediateDiskFullWithThreads([ValueSource(typeof(ConcurrentMergeSchedulers), "Values")] IConcurrentMergeScheduler scheduler)
        {
            int NUM_THREADS   = 3;
            int numIterations = TEST_NIGHTLY ? 10 : 3;

            for (int iter = 0; iter < numIterations; iter++)
            {
                if (VERBOSE)
                {
                    Console.WriteLine("\nTEST: iter=" + iter);
                }
                MockDirectoryWrapper dir = NewMockDirectory();
                var config = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()))
                             .SetMaxBufferedDocs(2)
                             .SetMergeScheduler(scheduler)
                             .SetMergePolicy(NewLogMergePolicy(4));
                IndexWriter writer = new IndexWriter(dir, config);
                scheduler.SetSuppressExceptions();
                dir.MaxSizeInBytes = 4 * 1024 + 20 * iter;

                IndexerThread[] threads = new IndexerThread[NUM_THREADS];

                for (int i = 0; i < NUM_THREADS; i++)
                {
                    threads[i] = new IndexerThread(writer, true, NewField);
                }

                for (int i = 0; i < NUM_THREADS; i++)
                {
                    threads[i].Start();
                }

                for (int i = 0; i < NUM_THREADS; i++)
                {
                    // Without fix for LUCENE-1130: one of the
                    // threads will hang
                    threads[i].Join();
                    Assert.IsTrue(threads[i].Error == null, "hit unexpected Throwable");
                }

                // Make sure once disk space is avail again, we can
                // cleanly close:
                dir.MaxSizeInBytes = 0;
                writer.Dispose(false);
                dir.Dispose();
            }
        }
        public virtual void TestImmediateDiskFull([ValueSource(typeof(ConcurrentMergeSchedulers), "Values")] IConcurrentMergeScheduler scheduler)
        {
            MockDirectoryWrapper dir = NewMockDirectory();
            var config = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()))
                         .SetMaxBufferedDocs(2)
                         .SetMergeScheduler(scheduler);
            IndexWriter writer = new IndexWriter(dir, config);

            dir.MaxSizeInBytes = Math.Max(1, dir.RecomputedActualSizeInBytes);
            Document  doc        = new Document();
            FieldType customType = new FieldType(TextField.TYPE_STORED);

            doc.Add(NewField("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", customType));
            try
            {
                writer.AddDocument(doc);
                Assert.Fail("did not hit disk full");
            }
            catch (IOException)
            {
            }
            // Without fix for LUCENE-1130: this call will hang:
            try
            {
                writer.AddDocument(doc);
                Assert.Fail("did not hit disk full");
            }
            catch (IOException)
            {
            }
            try
            {
                writer.Dispose(false);
                Assert.Fail("did not hit disk full");
            }
            catch (IOException)
            {
            }

            // Make sure once disk space is avail again, we can
            // cleanly close:
            dir.MaxSizeInBytes = 0;
            writer.Dispose(false);
            dir.Dispose();
        }
Beispiel #15
0
        public virtual void TestMergeDocCount0([ValueSource(typeof(ConcurrentMergeSchedulers), "Values")] IConcurrentMergeScheduler scheduler)
        {
            Directory dir = NewDirectory();

            LogDocMergePolicy ldmp = new LogDocMergePolicy();

            ldmp.MergeFactor = 100;
            IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMaxBufferedDocs(10).SetMergePolicy(ldmp));

            for (int i = 0; i < 250; i++)
            {
                AddDoc(writer);
                CheckInvariants(writer);
            }
            writer.Dispose();

            // delete some docs without merging
            writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMergePolicy(NoMergePolicy.NO_COMPOUND_FILES));
            writer.DeleteDocuments(new Term("content", "aaa"));
            writer.Dispose();

            ldmp             = new LogDocMergePolicy();
            ldmp.MergeFactor = 5;
            var config = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()))
                         .SetOpenMode(OpenMode_e.APPEND)
                         .SetMaxBufferedDocs(10)
                         .SetMergePolicy(ldmp)
                         .SetMergeScheduler(scheduler);

            writer = new IndexWriter(dir, config);

            // merge factor is changed, so check invariants after all adds
            for (int i = 0; i < 10; i++)
            {
                AddDoc(writer);
            }
            writer.Commit();
            writer.WaitForMerges();
            writer.Commit();
            CheckInvariants(writer);
            Assert.AreEqual(10, writer.MaxDoc);

            writer.Dispose();
            dir.Dispose();
        }
        /*
          Run one indexer and 2 searchers against single index as
          stress test.
        */
        public virtual void RunStressTest(Directory directory, IConcurrentMergeScheduler mergeScheduler)
        {
            IndexWriter modifier = new IndexWriter(directory, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode_e.CREATE).SetMaxBufferedDocs(10).SetMergeScheduler(mergeScheduler));
            modifier.Commit();

            TimedThread[] threads = new TimedThread[4];
            int numThread = 0;

            // One modifier that writes 10 docs then removes 5, over
            // and over:
            IndexerThread indexerThread = new IndexerThread(modifier, threads);
            threads[numThread++] = indexerThread;
            indexerThread.Start();

            IndexerThread indexerThread2 = new IndexerThread(modifier, threads);
            threads[numThread++] = indexerThread2;
            indexerThread2.Start();

            // Two searchers that constantly just re-instantiate the
            // searcher:
            SearcherThread searcherThread1 = new SearcherThread(directory, threads);
            threads[numThread++] = searcherThread1;
            searcherThread1.Start();

            SearcherThread searcherThread2 = new SearcherThread(directory, threads);
            threads[numThread++] = searcherThread2;
            searcherThread2.Start();

            for (int i = 0; i < numThread; i++)
            {
                threads[i].Join();
            }

            modifier.Dispose();

            for (int i = 0; i < numThread; i++)
            {
                Assert.IsTrue(!threads[i].Failed);
            }

            //System.out.println("    Writer: " + indexerThread.count + " iterations");
            //System.out.println("Searcher 1: " + searcherThread1.count + " searchers created");
            //System.out.println("Searcher 2: " + searcherThread2.count + " searchers created");
        }
Beispiel #17
0
        public virtual void TestCrashAfterReopen(
            [ValueSource(typeof(ConcurrentMergeSchedulers), "Values")] IConcurrentMergeScheduler scheduler)
        {
            IndexWriter          writer = InitIndex(scheduler, Random(), false);
            MockDirectoryWrapper dir    = (MockDirectoryWrapper)writer.Directory;

            // We create leftover files because merging could be
            // running when we crash:
            dir.AssertNoUnrefencedFilesOnClose = false;

            writer.Dispose();
            writer = InitIndex(scheduler, Random(), dir, false);
            Assert.AreEqual(314, writer.MaxDoc);
            Crash(writer);

            /*
             * System.out.println("\n\nTEST: open reader");
             * String[] l = dir.list();
             * Arrays.sort(l);
             * for(int i=0;i<l.Length;i++)
             * System.out.println("file " + i + " = " + l[i] + " " +
             * dir.FileLength(l[i]) + " bytes");
             */

            IndexReader reader = DirectoryReader.Open(dir);

            Assert.IsTrue(reader.NumDocs >= 157);
            reader.Dispose();

            // Make a new dir, copying from the crashed dir, and
            // open IW on it, to confirm IW "recovers" after a
            // crash:
            Directory dir2 = NewDirectory(dir);

            dir.Dispose();

            (new RandomIndexWriter(Random(), dir2, Similarity, TimeZone)).Dispose();
            dir2.Dispose();
        }
Beispiel #18
0
        public virtual void TestCrashAfterCloseNoWait(
            [ValueSource(typeof(ConcurrentMergeSchedulers), "Values")] IConcurrentMergeScheduler scheduler)
        {
            IndexWriter          writer = InitIndex(scheduler, Random(), false);
            MockDirectoryWrapper dir    = (MockDirectoryWrapper)writer.Directory;

            writer.Dispose(false);

            dir.Crash();

            /*
             * String[] l = dir.list();
             * Arrays.sort(l);
             * for(int i=0;i<l.Length;i++)
             * System.out.println("file " + i + " = " + l[i] + " " + dir.FileLength(l[i]) + " bytes");
             */
            IndexReader reader = DirectoryReader.Open(dir);

            Assert.AreEqual(157, reader.NumDocs);
            reader.Dispose();
            dir.Dispose();
        }
Beispiel #19
0
        public virtual void Test([ValueSource(typeof(ConcurrentMergeSchedulers), "Values")] IConcurrentMergeScheduler scheduler)
        {
            BaseDirectoryWrapper dir = NewFSDirectory(CreateTempDir("2BPostingsBytes1"));

            if (dir is MockDirectoryWrapper)
            {
                ((MockDirectoryWrapper)dir).Throttling = MockDirectoryWrapper.Throttling_e.NEVER;
            }

            var config = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()))
                         .SetMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH)
                         .SetRAMBufferSizeMB(256.0)
                         .SetMergeScheduler(scheduler)
                         .SetMergePolicy(NewLogMergePolicy(false, 10))
                         .SetOpenMode(IndexWriterConfig.OpenMode_e.CREATE);
            IndexWriter w = new IndexWriter(dir, config);

            MergePolicy mp = w.Config.MergePolicy;

            if (mp is LogByteSizeMergePolicy)
            {
                // 1 petabyte:
                ((LogByteSizeMergePolicy)mp).MaxMergeMB = 1024 * 1024 * 1024;
            }

            Document  doc = new Document();
            FieldType ft  = new FieldType(TextField.TYPE_NOT_STORED);

            ft.IndexOptions = FieldInfo.IndexOptions.DOCS_AND_FREQS;
            ft.OmitNorms    = true;
            MyTokenStream tokenStream = new MyTokenStream();
            Field         field       = new Field("field", tokenStream, ft);

            doc.Add(field);

            const int numDocs = 1000;

            for (int i = 0; i < numDocs; i++)
            {
                if (i % 2 == 1) // trick blockPF's little optimization
                {
                    tokenStream.n = 65536;
                }
                else
                {
                    tokenStream.n = 65537;
                }
                w.AddDocument(doc);
            }
            w.ForceMerge(1);
            w.Dispose();

            DirectoryReader oneThousand = DirectoryReader.Open(dir);

            IndexReader[] subReaders = new IndexReader[1000];
            Arrays.Fill(subReaders, oneThousand);
            MultiReader          mr   = new MultiReader(subReaders);
            BaseDirectoryWrapper dir2 = NewFSDirectory(CreateTempDir("2BPostingsBytes2"));

            if (dir2 is MockDirectoryWrapper)
            {
                ((MockDirectoryWrapper)dir2).Throttling = MockDirectoryWrapper.Throttling_e.NEVER;
            }
            IndexWriter w2 = new IndexWriter(dir2, new IndexWriterConfig(TEST_VERSION_CURRENT, null));

            w2.AddIndexes(mr);
            w2.ForceMerge(1);
            w2.Dispose();
            oneThousand.Dispose();

            DirectoryReader oneMillion = DirectoryReader.Open(dir2);

            subReaders = new IndexReader[2000];
            Arrays.Fill(subReaders, oneMillion);
            mr = new MultiReader(subReaders);
            BaseDirectoryWrapper dir3 = NewFSDirectory(CreateTempDir("2BPostingsBytes3"));

            if (dir3 is MockDirectoryWrapper)
            {
                ((MockDirectoryWrapper)dir3).Throttling = MockDirectoryWrapper.Throttling_e.NEVER;
            }
            IndexWriter w3 = new IndexWriter(dir3, new IndexWriterConfig(TEST_VERSION_CURRENT, null));

            w3.AddIndexes(mr);
            w3.ForceMerge(1);
            w3.Dispose();
            oneMillion.Dispose();

            dir.Dispose();
            dir2.Dispose();
            dir3.Dispose();
        }
        public virtual void TestFixedSorted([ValueSource(typeof(ConcurrentMergeSchedulers), "Values")] IConcurrentMergeScheduler scheduler)
        {
            BaseDirectoryWrapper dir = NewFSDirectory(CreateTempDir("2BFixedSorted"));

            if (dir is MockDirectoryWrapper)
            {
                ((MockDirectoryWrapper)dir).Throttling = MockDirectoryWrapper.Throttling_e.NEVER;
            }

            IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()))
                                            .SetMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH)
                                            .SetRAMBufferSizeMB(256.0)
                                            .SetMergeScheduler(scheduler)
                                            .SetMergePolicy(NewLogMergePolicy(false, 10))
                                            .SetOpenMode(IndexWriterConfig.OpenMode_e.CREATE));

            Document             doc     = new Document();
            var                  bytes   = new byte[2];
            BytesRef             data    = new BytesRef(bytes);
            SortedDocValuesField dvField = new SortedDocValuesField("dv", data);

            doc.Add(dvField);

            for (int i = 0; i < int.MaxValue; i++)
            {
                bytes[0] = (byte)(i >> 8);
                bytes[1] = (byte)i;
                w.AddDocument(doc);
                if (i % 100000 == 0)
                {
                    Console.WriteLine("indexed: " + i);
                    Console.Out.Flush();
                }
            }

            w.ForceMerge(1);
            w.Dispose();

            Console.WriteLine("verifying...");
            Console.Out.Flush();

            DirectoryReader r             = DirectoryReader.Open(dir);
            int             expectedValue = 0;

            foreach (AtomicReaderContext context in r.Leaves)
            {
                AtomicReader    reader  = context.AtomicReader;
                BytesRef        scratch = new BytesRef();
                BinaryDocValues dv      = reader.GetSortedDocValues("dv");
                for (int i = 0; i < reader.MaxDoc; i++)
                {
                    bytes[0] = (byte)(expectedValue >> 8);
                    bytes[1] = (byte)expectedValue;
                    dv.Get(i, scratch);
                    Assert.AreEqual(data, scratch);
                    expectedValue++;
                }
            }

            r.Dispose();
            dir.Dispose();
        }
Beispiel #21
0
        public virtual void Test([ValueSource(typeof(ConcurrentMergeSchedulers), "Values")] IConcurrentMergeScheduler scheduler)
        {
            MockDirectoryWrapper dir = new MockDirectoryWrapper(Random(), new MMapDirectory(CreateTempDir("4GBStoredFields")));

            dir.Throttling = MockDirectoryWrapper.Throttling_e.NEVER;

            var config = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()))
                         .SetMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH)
                         .SetRAMBufferSizeMB(256.0)
                         .SetMergeScheduler(scheduler)
                         .SetMergePolicy(NewLogMergePolicy(false, 10))
                         .SetOpenMode(IndexWriterConfig.OpenMode_e.CREATE);
            IndexWriter w = new IndexWriter(dir, config);

            MergePolicy mp = w.Config.MergePolicy;

            if (mp is LogByteSizeMergePolicy)
            {
                // 1 petabyte:
                ((LogByteSizeMergePolicy)mp).MaxMergeMB = 1024 * 1024 * 1024;
            }

            Document  doc = new Document();
            FieldType ft  = new FieldType();

            ft.Indexed = false;
            ft.Stored  = true;
            ft.Freeze();
            int valueLength = RandomInts.NextIntBetween(Random(), 1 << 13, 1 << 20);
            var value       = new byte[valueLength];

            for (int i = 0; i < valueLength; ++i)
            {
                // random so that even compressing codecs can't compress it
                value[i] = (byte)Random().Next(256);
            }
            Field f = new Field("fld", value, ft);

            doc.Add(f);

            int numDocs = (int)((1L << 32) / valueLength + 100);

            for (int i = 0; i < numDocs; ++i)
            {
                w.AddDocument(doc);
                if (VERBOSE && i % (numDocs / 10) == 0)
                {
                    Console.WriteLine(i + " of " + numDocs + "...");
                }
            }
            w.ForceMerge(1);
            w.Dispose();
            if (VERBOSE)
            {
                bool found = false;
                foreach (string file in dir.ListAll())
                {
                    if (file.EndsWith(".fdt"))
                    {
                        long fileLength = dir.FileLength(file);
                        if (fileLength >= 1L << 32)
                        {
                            found = true;
                        }
                        Console.WriteLine("File length of " + file + " : " + fileLength);
                    }
                }
                if (!found)
                {
                    Console.WriteLine("No .fdt file larger than 4GB, test bug?");
                }
            }

            DirectoryReader rd = DirectoryReader.Open(dir);
            Document        sd = rd.Document(numDocs - 1);

            Assert.IsNotNull(sd);
            Assert.AreEqual(1, sd.Fields.Count);
            BytesRef valueRef = sd.GetBinaryValue("fld");

            Assert.IsNotNull(valueRef);
            Assert.AreEqual(new BytesRef(value), valueRef);
            rd.Dispose();

            dir.Dispose();
        }
Beispiel #22
0
 private IndexWriter InitIndex(IConcurrentMergeScheduler scheduler, Random random, bool initialCommit)
 {
     return(InitIndex(scheduler, random, NewMockDirectory(random), initialCommit));
 }
        /// <summary>
        /// Make sure if modifier tries to commit but hits disk full that modifier
        /// remains consistent and usable. Similar to TestIndexReader.testDiskFull().
        /// </summary>
        private void DoTestOperationsOnDiskFull(IConcurrentMergeScheduler scheduler, bool updates)
        {
            Term searchTerm = new Term("content", "aaa");
            int START_COUNT = 157;
            int END_COUNT = 144;

            // First build up a starting index:
            MockDirectoryWrapper startDir = NewMockDirectory();
            // TODO: find the resource leak that only occurs sometimes here.
            startDir.NoDeleteOpenFile = false;
            IndexWriter writer = new IndexWriter(startDir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random(), MockTokenizer.WHITESPACE, false)));
            for (int i = 0; i < 157; i++)
            {
                Document d = new Document();
                d.Add(NewStringField("id", Convert.ToString(i), Field.Store.YES));
                d.Add(NewTextField("content", "aaa " + i, Field.Store.NO));
                if (DefaultCodecSupportsDocValues())
                {
                    d.Add(new NumericDocValuesField("dv", i));
                }
                writer.AddDocument(d);
            }
            writer.Dispose();

            long diskUsage = startDir.SizeInBytes();
            long diskFree = diskUsage + 10;

            IOException err = null;

            bool done = false;

            // Iterate w/ ever increasing free disk space:
            while (!done)
            {
                if (VERBOSE)
                {
                    Console.WriteLine("TEST: cycle");
                }
                MockDirectoryWrapper dir = new MockDirectoryWrapper(Random(), new RAMDirectory(startDir, NewIOContext(Random())));
                dir.PreventDoubleWrite = false;
                dir.AllowRandomFileNotFoundException = false;

                var config = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random(), MockTokenizer.WHITESPACE, false))
                                .SetMaxBufferedDocs(1000)
                                .SetMaxBufferedDeleteTerms(1000)
                                .SetMergeScheduler(scheduler);

                scheduler.SetSuppressExceptions();

                IndexWriter modifier = new IndexWriter(dir, config);

                // For each disk size, first try to commit against
                // dir that will hit random IOExceptions & disk
                // full; after, give it infinite disk space & turn
                // off random IOExceptions & retry w/ same reader:
                bool success = false;

                for (int x = 0; x < 2; x++)
                {
                    if (VERBOSE)
                    {
                        Console.WriteLine("TEST: x=" + x);
                    }

                    double rate = 0.1;
                    double diskRatio = ((double)diskFree) / diskUsage;
                    long thisDiskFree;
                    string testName;

                    if (0 == x)
                    {
                        thisDiskFree = diskFree;
                        if (diskRatio >= 2.0)
                        {
                            rate /= 2;
                        }
                        if (diskRatio >= 4.0)
                        {
                            rate /= 2;
                        }
                        if (diskRatio >= 6.0)
                        {
                            rate = 0.0;
                        }
                        if (VERBOSE)
                        {
                            Console.WriteLine("\ncycle: " + diskFree + " bytes");
                        }
                        testName = "disk full during reader.Dispose() @ " + thisDiskFree + " bytes";
                        dir.RandomIOExceptionRateOnOpen = Random().NextDouble() * 0.01;
                    }
                    else
                    {
                        thisDiskFree = 0;
                        rate = 0.0;
                        if (VERBOSE)
                        {
                            Console.WriteLine("\ncycle: same writer: unlimited disk space");
                        }
                        testName = "reader re-use after disk full";
                        dir.RandomIOExceptionRateOnOpen = 0.0;
                    }

                    dir.MaxSizeInBytes = thisDiskFree;
                    dir.RandomIOExceptionRate = rate;

                    try
                    {
                        if (0 == x)
                        {
                            int docId = 12;
                            for (int i = 0; i < 13; i++)
                            {
                                if (updates)
                                {
                                    Document d = new Document();
                                    d.Add(NewStringField("id", Convert.ToString(i), Field.Store.YES));
                                    d.Add(NewTextField("content", "bbb " + i, Field.Store.NO));
                                    if (DefaultCodecSupportsDocValues())
                                    {
                                        d.Add(new NumericDocValuesField("dv", i));
                                    }
                                    modifier.UpdateDocument(new Term("id", Convert.ToString(docId)), d);
                                } // deletes
                                else
                                {
                                    modifier.DeleteDocuments(new Term("id", Convert.ToString(docId)));
                                    // modifier.setNorm(docId, "contents", (float)2.0);
                                }
                                docId += 12;
                            }
                        }
                        modifier.Dispose();
                        success = true;
                        if (0 == x)
                        {
                            done = true;
                        }
                    }
                    catch (IOException e)
                    {
                        if (VERBOSE)
                        {
                            Console.WriteLine("  hit IOException: " + e);
                            Console.WriteLine(e.StackTrace);
                        }
                        err = e;
                        if (1 == x)
                        {
                            Console.WriteLine(e.ToString());
                            Console.Write(e.StackTrace);
                            Assert.Fail(testName + " hit IOException after disk space was freed up");
                        }
                    }
                    // prevent throwing a random exception here!!
                    double randomIOExceptionRate = dir.RandomIOExceptionRate;
                    long maxSizeInBytes = dir.MaxSizeInBytes;
                    dir.RandomIOExceptionRate = 0.0;
                    dir.RandomIOExceptionRateOnOpen = 0.0;
                    dir.MaxSizeInBytes = 0;
                    if (!success)
                    {
                        // Must force the close else the writer can have
                        // open files which cause exc in MockRAMDir.close
                        if (VERBOSE)
                        {
                            Console.WriteLine("TEST: now rollback");
                        }
                        modifier.Rollback();
                    }

                    // If the close() succeeded, make sure there are
                    // no unreferenced files.
                    if (success)
                    {
                        TestUtil.CheckIndex(dir);
                        TestIndexWriter.AssertNoUnreferencedFiles(dir, "after writer.close");
                    }
                    dir.RandomIOExceptionRate = randomIOExceptionRate;
                    dir.MaxSizeInBytes = maxSizeInBytes;

                    // Finally, verify index is not corrupt, and, if
                    // we succeeded, we see all docs changed, and if
                    // we failed, we see either all docs or no docs
                    // changed (transactional semantics):
                    IndexReader newReader = null;
                    try
                    {
                        newReader = DirectoryReader.Open(dir);
                    }
                    catch (IOException e)
                    {
                        Console.WriteLine(e.ToString());
                        Console.Write(e.StackTrace);
                        Assert.Fail(testName + ":exception when creating IndexReader after disk full during close: " + e);
                    }

                    IndexSearcher searcher = NewSearcher(newReader);
                    ScoreDoc[] hits = null;
                    try
                    {
                        hits = searcher.Search(new TermQuery(searchTerm), null, 1000).ScoreDocs;
                    }
                    catch (IOException e)
                    {
                        Console.WriteLine(e.ToString());
                        Console.Write(e.StackTrace);
                        Assert.Fail(testName + ": exception when searching: " + e);
                    }
                    int result2 = hits.Length;
                    if (success)
                    {
                        if (x == 0 && result2 != END_COUNT)
                        {
                            Assert.Fail(testName + ": method did not throw exception but hits.Length for search on term 'aaa' is " + result2 + " instead of expected " + END_COUNT);
                        }
                        else if (x == 1 && result2 != START_COUNT && result2 != END_COUNT)
                        {
                            // It's possible that the first exception was
                            // "recoverable" wrt pending deletes, in which
                            // case the pending deletes are retained and
                            // then re-flushing (with plenty of disk
                            // space) will succeed in flushing the
                            // deletes:
                            Assert.Fail(testName + ": method did not throw exception but hits.Length for search on term 'aaa' is " + result2 + " instead of expected " + START_COUNT + " or " + END_COUNT);
                        }
                    }
                    else
                    {
                        // On hitting exception we still may have added
                        // all docs:
                        if (result2 != START_COUNT && result2 != END_COUNT)
                        {
                            Console.WriteLine(err.ToString());
                            Console.Write(err.StackTrace);
                            Assert.Fail(testName + ": method did throw exception but hits.Length for search on term 'aaa' is " + result2 + " instead of expected " + START_COUNT + " or " + END_COUNT);
                        }
                    }
                    newReader.Dispose();
                    if (result2 == END_COUNT)
                    {
                        break;
                    }
                }
                dir.Dispose();
                modifier.Dispose();

                // Try again with 10 more bytes of free space:
                diskFree += 10;
            }
            startDir.Dispose();
        }
Beispiel #24
0
 public IndexerThread(TestTransactions outerInstance, object @lock, 
     Directory dir1, Directory dir2,
     IConcurrentMergeScheduler scheduler1, IConcurrentMergeScheduler scheduler2,
     TimedThread[] threads)
     : base(threads)
 {
     _scheduler1 = scheduler1;
     _scheduler2 = scheduler2;
     this.OuterInstance = outerInstance;
     this.@lock = @lock;
     this.Dir1 = dir1;
     this.Dir2 = dir2;
 }
Beispiel #25
0
 private IndexWriter InitIndex(IConcurrentMergeScheduler scheduler, Random random, bool initialCommit)
 {
     return InitIndex(scheduler, random, NewMockDirectory(random), initialCommit);
 }
Beispiel #26
0
        public virtual void TestCloseWithThreads([ValueSource(typeof(ConcurrentMergeSchedulers), "Values")] IConcurrentMergeScheduler scheduler)
        {
            int NUM_THREADS   = 3;
            int numIterations = TEST_NIGHTLY ? 7 : 3;

            for (int iter = 0; iter < numIterations; iter++)
            {
                if (VERBOSE)
                {
                    Console.WriteLine("\nTEST: iter=" + iter);
                }
                Directory dir    = NewDirectory();
                var       config = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()))
                                   .SetMaxBufferedDocs(10)
                                   .SetMergeScheduler(scheduler)
                                   .SetMergePolicy(NewLogMergePolicy(4));
                IndexWriter writer = new IndexWriter(dir, config);
                scheduler.SetSuppressExceptions();

                IndexerThread[] threads = new IndexerThread[NUM_THREADS];

                for (int i = 0; i < NUM_THREADS; i++)
                {
                    threads[i] = new IndexerThread(writer, false, NewField);
                }

                for (int i = 0; i < NUM_THREADS; i++)
                {
                    threads[i].Start();
                }

                bool done = false;
                while (!done)
                {
                    Thread.Sleep(100);
                    for (int i = 0; i < NUM_THREADS; i++)
                    // only stop when at least one thread has added a doc
                    {
                        if (threads[i].AddCount > 0)
                        {
                            done = true;
                            break;
                        }
                        else if (!threads[i].IsAlive)
                        {
                            Assert.Fail("thread failed before indexing a single document");
                        }
                    }
                }

                if (VERBOSE)
                {
                    Console.WriteLine("\nTEST: now close");
                }
                writer.Dispose(false);

                // Make sure threads that are adding docs are not hung:
                for (int i = 0; i < NUM_THREADS; i++)
                {
                    // Without fix for LUCENE-1130: one of the
                    // threads will hang
                    threads[i].Join();
                    if (threads[i].IsAlive)
                    {
                        Assert.Fail("thread seems to be hung");
                    }
                }

                // Quick test to make sure index is not corrupt:
                IndexReader reader = DirectoryReader.Open(dir);
                DocsEnum    tdocs  = TestUtil.Docs(Random(), reader, "field", new BytesRef("aaa"), MultiFields.GetLiveDocs(reader), null, 0);
                int         count  = 0;
                while (tdocs.NextDoc() != DocIdSetIterator.NO_MORE_DOCS)
                {
                    count++;
                }
                Assert.IsTrue(count > 0);
                reader.Dispose();

                dir.Dispose();
            }
        }
Beispiel #27
0
 public virtual void TestIOExceptionDuringWriteSegmentWithThreadsOnlyOnce([ValueSource(typeof(ConcurrentMergeSchedulers), "Values")] IConcurrentMergeScheduler scheduler)
 {
     TestMultipleThreadsFailure(scheduler, new FailOnlyInWriteSegment(true));
 }
Beispiel #28
0
 public virtual void TestIOExceptionDuringWriteSegment([ValueSource(typeof(ConcurrentMergeSchedulers), "Values")] IConcurrentMergeScheduler scheduler)
 {
     TestSingleThreadFailure(scheduler, new FailOnlyInWriteSegment(false));
 }
Beispiel #29
0
 public virtual void TestIOExceptionDuringAbortWithThreads([ValueSource(typeof(ConcurrentMergeSchedulers), "Values")] IConcurrentMergeScheduler scheduler)
 {
     TestMultipleThreadsFailure(scheduler, new FailOnlyOnAbortOrFlush(false));
 }
Beispiel #30
0
 public virtual void TestIOExceptionDuringAbortOnlyOnce([ValueSource(typeof(ConcurrentMergeSchedulers), "Values")] IConcurrentMergeScheduler scheduler)
 {
     TestSingleThreadFailure(scheduler, new FailOnlyOnAbortOrFlush(true));
 }
        // Runs test, with one thread, using the specific failure
        // to trigger an IOException
        public virtual void TestSingleThreadFailure(IConcurrentMergeScheduler scheduler, MockDirectoryWrapper.Failure failure)
        {
            MockDirectoryWrapper dir = NewMockDirectory();

            IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMaxBufferedDocs(2).SetMergeScheduler(scheduler));
            Document doc = new Document();
            FieldType customType = new FieldType(TextField.TYPE_STORED);
            customType.StoreTermVectors = true;
            customType.StoreTermVectorPositions = true;
            customType.StoreTermVectorOffsets = true;
            doc.Add(NewField("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", customType));

            for (int i = 0; i < 6; i++)
            {
                writer.AddDocument(doc);
            }

            dir.FailOn(failure);
            failure.SetDoFail();
            try
            {
                writer.AddDocument(doc);
                writer.AddDocument(doc);
                writer.Commit();
                Assert.Fail("did not hit exception");
            }
            catch (IOException)
            {
            }
            failure.ClearDoFail();
            writer.AddDocument(doc);
            writer.Dispose(false);
            dir.Dispose();
        }
Beispiel #32
0
        public virtual void Test2BTerms_Mem([ValueSource(typeof(ConcurrentMergeSchedulers), "Values")] IConcurrentMergeScheduler scheduler)
        {
            if ("Lucene3x".Equals(Codec.Default.Name))
            {
                throw new Exception("this test cannot run with PreFlex codec");
            }
            Console.WriteLine("Starting Test2B");
            long TERM_COUNT = ((long)int.MaxValue) + 100000000;

            int TERMS_PER_DOC = TestUtil.NextInt(Random(), 100000, 1000000);

            IList <BytesRef> savedTerms = null;

            BaseDirectoryWrapper dir = NewFSDirectory(CreateTempDir("2BTerms"));

            //MockDirectoryWrapper dir = NewFSDirectory(new File("/p/lucene/indices/2bindex"));
            if (dir is MockDirectoryWrapper)
            {
                ((MockDirectoryWrapper)dir).Throttling = MockDirectoryWrapper.Throttling_e.NEVER;
            }
            dir.CheckIndexOnClose = false; // don't double-checkindex

            if (true)
            {
                IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()))
                                                .SetMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH)
                                                .SetRAMBufferSizeMB(256.0)
                                                .SetMergeScheduler(scheduler)
                                                .SetMergePolicy(NewLogMergePolicy(false, 10))
                                                .SetOpenMode(IndexWriterConfig.OpenMode_e.CREATE));

                MergePolicy mp = w.Config.MergePolicy;
                if (mp is LogByteSizeMergePolicy)
                {
                    // 1 petabyte:
                    ((LogByteSizeMergePolicy)mp).MaxMergeMB = 1024 * 1024 * 1024;
                }

                Documents.Document doc = new Documents.Document();
                MyTokenStream      ts  = new MyTokenStream(Random(), TERMS_PER_DOC);

                FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
                customType.IndexOptions = FieldInfo.IndexOptions.DOCS_ONLY;
                customType.OmitNorms    = true;
                Field field = new Field("field", ts, customType);
                doc.Add(field);
                //w.setInfoStream(System.out);
                int numDocs = (int)(TERM_COUNT / TERMS_PER_DOC);

                Console.WriteLine("TERMS_PER_DOC=" + TERMS_PER_DOC);
                Console.WriteLine("numDocs=" + numDocs);

                for (int i = 0; i < numDocs; i++)
                {
                    long t0 = Environment.TickCount;
                    w.AddDocument(doc);
                    Console.WriteLine(i + " of " + numDocs + " " + (Environment.TickCount - t0) + " msec");
                }
                savedTerms = ts.SavedTerms;

                Console.WriteLine("TEST: full merge");
                w.ForceMerge(1);
                Console.WriteLine("TEST: close writer");
                w.Dispose();
            }

            Console.WriteLine("TEST: open reader");
            IndexReader r = DirectoryReader.Open(dir);

            if (savedTerms == null)
            {
                savedTerms = FindTerms(r);
            }
            int numSavedTerms            = savedTerms.Count;
            IList <BytesRef> bigOrdTerms = new List <BytesRef>(savedTerms.SubList(numSavedTerms - 10, numSavedTerms));

            Console.WriteLine("TEST: test big ord terms...");
            TestSavedTerms(r, bigOrdTerms);
            Console.WriteLine("TEST: test all saved terms...");
            TestSavedTerms(r, savedTerms);
            r.Dispose();

            Console.WriteLine("TEST: now CheckIndex...");
            CheckIndex.Status status = TestUtil.CheckIndex(dir);
            long tc = status.SegmentInfos[0].TermIndexStatus.TermCount;

            Assert.IsTrue(tc > int.MaxValue, "count " + tc + " is not > " + int.MaxValue);

            dir.Dispose();
            Console.WriteLine("TEST: done!");
        }
        // Runs test, with multiple threads, using the specific
        // failure to trigger an IOException
        public virtual void TestMultipleThreadsFailure(IConcurrentMergeScheduler scheduler, MockDirectoryWrapper.Failure failure)
        {
            int NUM_THREADS = 3;

            for (int iter = 0; iter < 2; iter++)
            {
                if (VERBOSE)
                {
                    Console.WriteLine("TEST: iter=" + iter);
                }
                MockDirectoryWrapper dir = NewMockDirectory();
                var config = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()))
                                .SetMaxBufferedDocs(2)
                                .SetMergeScheduler(scheduler)
                                .SetMergePolicy(NewLogMergePolicy(4));
                IndexWriter writer = new IndexWriter(dir, config);
                scheduler.SetSuppressExceptions();

                IndexerThread[] threads = new IndexerThread[NUM_THREADS];

                for (int i = 0; i < NUM_THREADS; i++)
                {
                    threads[i] = new IndexerThread(writer, true);
                }

                for (int i = 0; i < NUM_THREADS; i++)
                {
                    threads[i].Start();
                }

                Thread.Sleep(10);

                dir.FailOn(failure);
                failure.SetDoFail();

                for (int i = 0; i < NUM_THREADS; i++)
                {
                    threads[i].Join();
                    Assert.IsTrue(threads[i].Error == null, "hit unexpected Throwable");
                }

                bool success = false;
                try
                {
                    writer.Dispose(false);
                    success = true;
                }
                catch (IOException)
                {
                    failure.ClearDoFail();
                    writer.Dispose(false);
                }
                if (VERBOSE)
                {
                    Console.WriteLine("TEST: success=" + success);
                }

                if (success)
                {
                    IndexReader reader = DirectoryReader.Open(dir);
                    Bits delDocs = MultiFields.GetLiveDocs(reader);
                    for (int j = 0; j < reader.MaxDoc; j++)
                    {
                        if (delDocs == null || !delDocs.Get(j))
                        {
                            reader.Document(j);
                            reader.GetTermVectors(j);
                        }
                    }
                    reader.Dispose();
                }

                dir.Dispose();
            }
        }
Beispiel #34
0
        public virtual void TestVariableBinary([ValueSource(typeof(ConcurrentMergeSchedulers), "Values")] IConcurrentMergeScheduler scheduler)
        {
            BaseDirectoryWrapper dir = NewFSDirectory(CreateTempDir("2BVariableBinary"));

            if (dir is MockDirectoryWrapper)
            {
                ((MockDirectoryWrapper)dir).Throttling = MockDirectoryWrapper.Throttling_e.NEVER;
            }

            var config = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()))
                         .SetMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH)
                         .SetRAMBufferSizeMB(256.0)
                         .SetMergeScheduler(scheduler)
                         .SetMergePolicy(NewLogMergePolicy(false, 10))
                         .SetOpenMode(IndexWriterConfig.OpenMode_e.CREATE);
            IndexWriter w = new IndexWriter(dir, config);

            Document             doc     = new Document();
            var                  bytes   = new byte[4];
            ByteArrayDataOutput  encoder = new ByteArrayDataOutput(bytes);
            BytesRef             data    = new BytesRef(bytes);
            BinaryDocValuesField dvField = new BinaryDocValuesField("dv", data);

            doc.Add(dvField);

            for (int i = 0; i < int.MaxValue; i++)
            {
                encoder.Reset(bytes);
                encoder.WriteVInt(i % 65535); // 1, 2, or 3 bytes
                data.Length = encoder.Position;
                w.AddDocument(doc);
                if (i % 100000 == 0)
                {
                    Console.WriteLine("indexed: " + i);
                    Console.Out.Flush();
                }
            }

            w.ForceMerge(1);
            w.Dispose();

            Console.WriteLine("verifying...");
            Console.Out.Flush();

            DirectoryReader    r             = DirectoryReader.Open(dir);
            int                expectedValue = 0;
            ByteArrayDataInput input         = new ByteArrayDataInput();

            foreach (AtomicReaderContext context in r.Leaves)
            {
                AtomicReader    reader  = context.AtomicReader;
                BytesRef        scratch = new BytesRef(bytes);
                BinaryDocValues dv      = reader.GetBinaryDocValues("dv");
                for (int i = 0; i < reader.MaxDoc; i++)
                {
                    dv.Get(i, scratch);
                    input.Reset((byte[])(Array)scratch.Bytes, scratch.Offset, scratch.Length);
                    Assert.AreEqual(expectedValue % 65535, input.ReadVInt());
                    Assert.IsTrue(input.Eof());
                    expectedValue++;
                }
            }

            r.Dispose();
            dir.Dispose();
        }
Beispiel #35
0
        // Runs test, with multiple threads, using the specific
        // failure to trigger an IOException
        public virtual void TestMultipleThreadsFailure(IConcurrentMergeScheduler scheduler, MockDirectoryWrapper.Failure failure)
        {
            int NUM_THREADS = 3;

            for (int iter = 0; iter < 2; iter++)
            {
                if (VERBOSE)
                {
                    Console.WriteLine("TEST: iter=" + iter);
                }
                MockDirectoryWrapper dir = NewMockDirectory();
                var config = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()))
                             .SetMaxBufferedDocs(2)
                             .SetMergeScheduler(scheduler)
                             .SetMergePolicy(NewLogMergePolicy(4));
                IndexWriter writer = new IndexWriter(dir, config);
                scheduler.SetSuppressExceptions();

                IndexerThread[] threads = new IndexerThread[NUM_THREADS];

                for (int i = 0; i < NUM_THREADS; i++)
                {
                    threads[i] = new IndexerThread(writer, true, NewField);
                }

                for (int i = 0; i < NUM_THREADS; i++)
                {
                    threads[i].Start();
                }

                Thread.Sleep(10);

                dir.FailOn(failure);
                failure.SetDoFail();

                for (int i = 0; i < NUM_THREADS; i++)
                {
                    threads[i].Join();
                    Assert.IsTrue(threads[i].Error == null, "hit unexpected Throwable");
                }

                bool success = false;
                try
                {
                    writer.Dispose(false);
                    success = true;
                }
                catch (IOException)
                {
                    failure.ClearDoFail();
                    writer.Dispose(false);
                }
                if (VERBOSE)
                {
                    Console.WriteLine("TEST: success=" + success);
                }

                if (success)
                {
                    IndexReader reader  = DirectoryReader.Open(dir);
                    Bits        delDocs = MultiFields.GetLiveDocs(reader);
                    for (int j = 0; j < reader.MaxDoc; j++)
                    {
                        if (delDocs == null || !delDocs.Get(j))
                        {
                            reader.Document(j);
                            reader.GetTermVectors(j);
                        }
                    }
                    reader.Dispose();
                }

                dir.Dispose();
            }
        }