コード例 #1
0
        private void DoTestDgaps(int size, int count1, int count2)
        {
            MockDirectoryWrapper d = new MockDirectoryWrapper(Random(), new RAMDirectory());

            d.PreventDoubleWrite = false;
            BitVector bv = new BitVector(size);

            bv.InvertAll();
            for (int i = 0; i < count1; i++)
            {
                bv.Clear(i);
                Assert.AreEqual(i + 1, size - bv.Count());
            }
            bv.Write(d, "TESTBV", NewIOContext(Random()));
            // gradually increase number of set bits
            for (int i = count1; i < count2; i++)
            {
                BitVector bv2 = new BitVector(d, "TESTBV", NewIOContext(Random()));
                Assert.IsTrue(DoCompare(bv, bv2));
                bv = bv2;
                bv.Clear(i);
                Assert.AreEqual(i + 1, size - bv.Count());
                bv.Write(d, "TESTBV", NewIOContext(Random()));
            }
            // now start decreasing number of set bits
            for (int i = count2 - 1; i >= count1; i--)
            {
                BitVector bv2 = new BitVector(d, "TESTBV", NewIOContext(Random()));
                Assert.IsTrue(DoCompare(bv, bv2));
                bv = bv2;
                bv.Set(i);
                Assert.AreEqual(i, size - bv.Count());
                bv.Write(d, "TESTBV", NewIOContext(Random()));
            }
        }
コード例 #2
0
        public virtual void TestTransactions_Mem(
            [ValueSource(typeof(ConcurrentMergeSchedulers), "Values")]IConcurrentMergeScheduler scheduler1,
            [ValueSource(typeof(ConcurrentMergeSchedulers), "Values")]IConcurrentMergeScheduler scheduler2)
        {
            Console.WriteLine("Start test");
            // we cant use non-ramdir on windows, because this test needs to double-write.
            MockDirectoryWrapper dir1 = new MockDirectoryWrapper(Random(), new RAMDirectory());
            MockDirectoryWrapper dir2 = new MockDirectoryWrapper(Random(), new RAMDirectory());
            dir1.PreventDoubleWrite = false;
            dir2.PreventDoubleWrite = false;
            dir1.FailOn(new RandomFailure(this));
            dir2.FailOn(new RandomFailure(this));
            dir1.FailOnOpenInput = false;
            dir2.FailOnOpenInput = false;

            // We throw exceptions in deleteFile, which creates
            // leftover files:
            dir1.AssertNoUnrefencedFilesOnClose = false;
            dir2.AssertNoUnrefencedFilesOnClose = false;

            InitIndex(dir1);
            InitIndex(dir2);

            TimedThread[] threads = new TimedThread[3];
            int numThread = 0;

            IndexerThread indexerThread = new IndexerThread(this, this, dir1, dir2, scheduler1, scheduler2, threads);
            threads[numThread++] = indexerThread;
            indexerThread.Start();

            SearcherThread searcherThread1 = new SearcherThread(this, dir1, dir2, threads);
            threads[numThread++] = searcherThread1;
            searcherThread1.Start();

            SearcherThread searcherThread2 = new SearcherThread(this, dir1, dir2, threads);
            threads[numThread++] = searcherThread2;
            searcherThread2.Start();

            for (int i = 0; i < numThread; i++)
            {
                threads[i].Join();
            }

            for (int i = 0; i < numThread; i++)
            {
                Assert.IsTrue(!threads[i].Failed);
            }
            dir1.Dispose();
            dir2.Dispose();

            Console.WriteLine("End test");
        }
コード例 #3
0
        private void DoTestWriteRead(int n)
        {
            MockDirectoryWrapper d = new MockDirectoryWrapper(Random(), new RAMDirectory());

            d.PreventDoubleWrite = false;
            BitVector bv = new BitVector(n);

            // test count when incrementally setting bits
            for (int i = 0; i < bv.Size(); i++)
            {
                Assert.IsFalse(bv.Get(i));
                Assert.AreEqual(i, bv.Count());
                bv.Set(i);
                Assert.IsTrue(bv.Get(i));
                Assert.AreEqual(i + 1, bv.Count());
                bv.Write(d, "TESTBV", NewIOContext(Random()));
                BitVector compare = new BitVector(d, "TESTBV", NewIOContext(Random()));
                // compare bit vectors with bits set incrementally
                Assert.IsTrue(DoCompare(bv, compare));
            }
        }
コード例 #4
0
ファイル: PayloadHelper.cs プロジェクト: Cefa68000/lucenenet
        /// <summary>
        /// Sets up a RAMDirectory, and adds documents (using English.IntToEnglish()) with two fields: field and multiField
        /// and analyzes them using the PayloadAnalyzer </summary>
        /// <param name="similarity"> The Similarity class to use in the Searcher </param>
        /// <param name="numDocs"> The num docs to add </param>
        /// <returns> An IndexSearcher </returns>
        // TODO: randomize
        public virtual IndexSearcher SetUp(Random random, Similarity similarity, int numDocs)
        {
            Directory directory = new MockDirectoryWrapper(random, new RAMDirectory());
            PayloadAnalyzer analyzer = new PayloadAnalyzer(this);

            // TODO randomize this
            IndexWriter writer = new IndexWriter(directory, (new IndexWriterConfig(LuceneTestCase.TEST_VERSION_CURRENT, analyzer)).SetSimilarity(similarity));
            // writer.infoStream = System.out;
            for (int i = 0; i < numDocs; i++)
            {
                Document doc = new Document();
                doc.Add(new TextField(FIELD, English.IntToEnglish(i), Field.Store.YES));
                doc.Add(new TextField(MULTI_FIELD, English.IntToEnglish(i) + "  " + English.IntToEnglish(i), Field.Store.YES));
                doc.Add(new TextField(NO_PAYLOAD_FIELD, English.IntToEnglish(i), Field.Store.YES));
                writer.AddDocument(doc);
            }
            Reader = DirectoryReader.Open(writer, true);
            writer.Dispose();

            IndexSearcher searcher = LuceneTestCase.NewSearcher(Reader);
            searcher.Similarity = similarity;
            return searcher;
        }
コード例 #5
0
        public virtual void TestDgaps()
        {
            DoTestDgaps(1, 0, 1);
            DoTestDgaps(10, 0, 1);
            DoTestDgaps(100, 0, 1);
            DoTestDgaps(1000, 4, 7);
            DoTestDgaps(10000, 40, 43);
            DoTestDgaps(100000, 415, 418);
            DoTestDgaps(1000000, 3123, 3126);
            // now exercise skipping of fully populated byte in the bitset (they are omitted if bitset is sparse)
            MockDirectoryWrapper d = new MockDirectoryWrapper(Random(), new RAMDirectory());

            d.PreventDoubleWrite = false;
            BitVector bv = new BitVector(10000);

            bv.Set(0);
            for (int i = 8; i < 16; i++)
            {
                bv.Set(i);
            } // make sure we have once byte full of set bits
            for (int i = 32; i < 40; i++)
            {
                bv.Set(i);
            } // get a second byte full of set bits
            // add some more bits here
            for (int i = 40; i < 10000; i++)
            {
                if (Random().Next(1000) == 0)
                {
                    bv.Set(i);
                }
            }
            bv.Write(d, "TESTBV", NewIOContext(Random()));
            BitVector compare = new BitVector(d, "TESTBV", NewIOContext(Random()));

            Assert.IsTrue(DoCompare(bv, compare));
        }
コード例 #6
0
        public static void BeforeClass()
        {
            Directory = NewDirectory();
            RandomIndexWriter writer = new RandomIndexWriter(Random(), Directory, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMergePolicy(NewLogMergePolicy()));
            for (int i = 0; i < DocFields.Length; i++)
            {
                Document doc = new Document();
                doc.Add(NewTextField(field, DocFields[i], Field.Store.NO));
                writer.AddDocument(doc);
            }
            writer.Dispose();
            LittleReader = DirectoryReader.Open(Directory);
            Searcher = NewSearcher(LittleReader);
            // this is intentionally using the baseline sim, because it compares against bigSearcher (which uses a random one)
            Searcher.Similarity = new DefaultSimilarity();

            // Make big index
            Dir2 = new MockDirectoryWrapper(Random(), new RAMDirectory(Directory, IOContext.DEFAULT));

            // First multiply small test index:
            MulFactor = 1;
            int docCount = 0;
            if (VERBOSE)
            {
                Console.WriteLine("\nTEST: now copy index...");
            }
            do
            {
                if (VERBOSE)
                {
                    Console.WriteLine("\nTEST: cycle...");
                }
                Directory copy = new MockDirectoryWrapper(Random(), new RAMDirectory(Dir2, IOContext.DEFAULT));
                RandomIndexWriter w = new RandomIndexWriter(Random(), Dir2);
                w.AddIndexes(copy);
                docCount = w.MaxDoc();
                w.Dispose();
                MulFactor *= 2;
            } while (docCount < 3000);

            RandomIndexWriter riw = new RandomIndexWriter(Random(), Dir2, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMaxBufferedDocs(TestUtil.NextInt(Random(), 50, 1000)));
            Document doc_ = new Document();
            doc_.Add(NewTextField("field2", "xxx", Field.Store.NO));
            for (int i = 0; i < NUM_EXTRA_DOCS / 2; i++)
            {
                riw.AddDocument(doc_);
            }
            doc_ = new Document();
            doc_.Add(NewTextField("field2", "big bad bug", Field.Store.NO));
            for (int i = 0; i < NUM_EXTRA_DOCS / 2; i++)
            {
                riw.AddDocument(doc_);
            }
            Reader = riw.Reader;
            BigSearcher = NewSearcher(Reader);
            riw.Dispose();
        }
コード例 #7
0
ファイル: TestAddIndexes.cs プロジェクト: Cefa68000/lucenenet
                public override void Run()
                {
                    try
                    {
                        Directory[] dirs = new Directory[OuterInstance.NUM_COPY];
                        for (int k = 0; k < OuterInstance.NUM_COPY; k++)
                        {
                            dirs[k] = new MockDirectoryWrapper(Random(), new RAMDirectory(OuterInstance.Dir, NewIOContext(Random())));
                        }

                        int j = 0;

                        while (true)
                        {
                            // System.out.println(Thread.currentThread().getName() + ": iter j=" + j);
                            if (NumIter > 0 && j == NumIter)
                            {
                                break;
                            }
                            OuterInstance.DoBody(j++, dirs);
                        }
                    }
                    catch (Exception t)
                    {
                        OuterInstance.Handle(t);
                    }
                }
コード例 #8
0
ファイル: TestAddIndexes.cs プロジェクト: Cefa68000/lucenenet
        public virtual void TestNonCFSLeftovers()
        {
            Directory[] dirs = new Directory[2];
            for (int i = 0; i < dirs.Length; i++)
            {
                dirs[i] = new RAMDirectory();
                IndexWriter w = new IndexWriter(dirs[i], new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())));
                Document d = new Document();
                FieldType customType = new FieldType(TextField.TYPE_STORED);
                customType.StoreTermVectors = true;
                d.Add(new Field("c", "v", customType));
                w.AddDocument(d);
                w.Dispose();
            }

            IndexReader[] readers = new IndexReader[] { DirectoryReader.Open(dirs[0]), DirectoryReader.Open(dirs[1]) };

            Directory dir = new MockDirectoryWrapper(Random(), new RAMDirectory());
            IndexWriterConfig conf = (new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()))).SetMergePolicy(NewLogMergePolicy(true));
            MergePolicy lmp = conf.MergePolicy;
            // Force creation of CFS:
            lmp.NoCFSRatio = 1.0;
            lmp.MaxCFSSegmentSizeMB = double.PositiveInfinity;
            IndexWriter w3 = new IndexWriter(dir, conf);
            w3.AddIndexes(readers);
            w3.Dispose();
            // we should now see segments_X,
            // segments.gen,_Y.cfs,_Y.cfe, _Z.si
            Assert.AreEqual(5, dir.ListAll().Length, "Only one compound segment should exist, but got: " + Arrays.ToString(dir.ListAll()));
            dir.Dispose();
        }
コード例 #9
0
 private void DoTestDgaps(int size, int count1, int count2)
 {
     MockDirectoryWrapper d = new MockDirectoryWrapper(Random(), new RAMDirectory());
     d.PreventDoubleWrite = false;
     BitVector bv = new BitVector(size);
     bv.InvertAll();
     for (int i = 0; i < count1; i++)
     {
         bv.Clear(i);
         Assert.AreEqual(i + 1, size - bv.Count());
     }
     bv.Write(d, "TESTBV", NewIOContext(Random()));
     // gradually increase number of set bits
     for (int i = count1; i < count2; i++)
     {
         BitVector bv2 = new BitVector(d, "TESTBV", NewIOContext(Random()));
         Assert.IsTrue(DoCompare(bv, bv2));
         bv = bv2;
         bv.Clear(i);
         Assert.AreEqual(i + 1, size - bv.Count());
         bv.Write(d, "TESTBV", NewIOContext(Random()));
     }
     // now start decreasing number of set bits
     for (int i = count2 - 1; i >= count1; i--)
     {
         BitVector bv2 = new BitVector(d, "TESTBV", NewIOContext(Random()));
         Assert.IsTrue(DoCompare(bv, bv2));
         bv = bv2;
         bv.Set(i);
         Assert.AreEqual(i, size - bv.Count());
         bv.Write(d, "TESTBV", NewIOContext(Random()));
     }
 }
コード例 #10
0
 public virtual void TestDgaps()
 {
     DoTestDgaps(1, 0, 1);
     DoTestDgaps(10, 0, 1);
     DoTestDgaps(100, 0, 1);
     DoTestDgaps(1000, 4, 7);
     DoTestDgaps(10000, 40, 43);
     DoTestDgaps(100000, 415, 418);
     DoTestDgaps(1000000, 3123, 3126);
     // now exercise skipping of fully populated byte in the bitset (they are omitted if bitset is sparse)
     MockDirectoryWrapper d = new MockDirectoryWrapper(Random(), new RAMDirectory());
     d.PreventDoubleWrite = false;
     BitVector bv = new BitVector(10000);
     bv.Set(0);
     for (int i = 8; i < 16; i++)
     {
         bv.Set(i);
     } // make sure we have once byte full of set bits
     for (int i = 32; i < 40; i++)
     {
         bv.Set(i);
     } // get a second byte full of set bits
     // add some more bits here
     for (int i = 40; i < 10000; i++)
     {
         if (Random().Next(1000) == 0)
         {
             bv.Set(i);
         }
     }
     bv.Write(d, "TESTBV", NewIOContext(Random()));
     BitVector compare = new BitVector(d, "TESTBV", NewIOContext(Random()));
     Assert.IsTrue(DoCompare(bv, compare));
 }
コード例 #11
0
        public void TestBigDocuments()
        {
            // "big" as "much bigger than the chunk size"
            // for this test we force a FS dir
            // we can't just use newFSDirectory, because this test doesn't really index anything.
            // so if we get NRTCachingDir+SimpleText, we make massive stored fields and OOM (LUCENE-4484)
            Directory dir = new MockDirectoryWrapper(Random(), new MMapDirectory(CreateTempDir("testBigDocuments")));
            IndexWriterConfig iwConf = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()));
            iwConf.SetMaxBufferedDocs(RandomInts.NextIntBetween(Random(), 2, 30));
            RandomIndexWriter iw = new RandomIndexWriter(Random(), dir, iwConf);

            if (dir is MockDirectoryWrapper)
            {
                ((MockDirectoryWrapper)dir).Throttling = MockDirectoryWrapper.Throttling_e.NEVER;
            }

            Document emptyDoc = new Document(); // emptyDoc
            Document bigDoc1 = new Document(); // lot of small fields
            Document bigDoc2 = new Document(); // 1 very big field

            Field idField = new StringField("id", "", Field.Store.NO);
            emptyDoc.Add(idField);
            bigDoc1.Add(idField);
            bigDoc2.Add(idField);

            FieldType onlyStored = new FieldType(StringField.TYPE_STORED);
            onlyStored.Indexed = false;

            Field smallField = new Field("fld", RandomByteArray(Random().Next(10), 256), onlyStored);
            int numFields = RandomInts.NextIntBetween(Random(), 500000, 1000000);
            for (int i = 0; i < numFields; ++i)
            {
                bigDoc1.Add(smallField);
            }

            Field bigField = new Field("fld", RandomByteArray(RandomInts.NextIntBetween(Random(), 1000000, 5000000), 2), onlyStored);
            bigDoc2.Add(bigField);

            int numDocs = AtLeast(5);
            Document[] docs = new Document[numDocs];
            for (int i = 0; i < numDocs; ++i)
            {
                docs[i] = RandomInts.RandomFrom(Random(), Arrays.AsList(emptyDoc, bigDoc1, bigDoc2));
            }
            for (int i = 0; i < numDocs; ++i)
            {
                idField.StringValue = "" + i;
                iw.AddDocument(docs[i]);
                if (Random().Next(numDocs) == 0)
                {
                    iw.Commit();
                }
            }
            iw.Commit();
            iw.ForceMerge(1); // look at what happens when big docs are merged
            DirectoryReader rd = DirectoryReader.Open(dir);
            IndexSearcher searcher = new IndexSearcher(rd);
            for (int i = 0; i < numDocs; ++i)
            {
                Query query = new TermQuery(new Term("id", "" + i));
                TopDocs topDocs = searcher.Search(query, 1);
                Assert.AreEqual(1, topDocs.TotalHits, "" + i);
                Document doc = rd.Document(topDocs.ScoreDocs[0].Doc);
                Assert.IsNotNull(doc);
                IndexableField[] fieldValues = doc.GetFields("fld");
                Assert.AreEqual(docs[i].GetFields("fld").Length, fieldValues.Length);
                if (fieldValues.Length > 0)
                {
                    Assert.AreEqual(docs[i].GetFields("fld")[0].BinaryValue(), fieldValues[0].BinaryValue());
                }
            }
            rd.Dispose();
            iw.Dispose();
            dir.Dispose();
        }
コード例 #12
0
 public override void Eval(MockDirectoryWrapper dir)
 {
     if (DoFail && Random().Next() % 10 <= 3)
     {
         throw new IOException("now failing randomly but on purpose");
     }
 }
コード例 #13
0
        // Runs test, with one thread, using the specific failure
        // to trigger an IOException
        public virtual void _testSingleThreadFailure(MockDirectoryWrapper.Failure failure)
        {
            MockDirectoryWrapper dir = NewMockDirectory();

            IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMaxBufferedDocs(2).SetMergeScheduler(new ConcurrentMergeScheduler()));
            Document doc = new Document();
            FieldType customType = new FieldType(TextField.TYPE_STORED);
            customType.StoreTermVectors = true;
            customType.StoreTermVectorPositions = true;
            customType.StoreTermVectorOffsets = true;
            doc.Add(NewField("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", customType));

            for (int i = 0; i < 6; i++)
            {
                writer.AddDocument(doc);
            }

            dir.FailOn(failure);
            failure.SetDoFail();
            try
            {
                writer.AddDocument(doc);
                writer.AddDocument(doc);
                writer.Commit();
                Assert.Fail("did not hit exception");
            }
            catch (IOException ioe)
            {
            }
            failure.ClearDoFail();
            writer.AddDocument(doc);
            writer.Dispose(false);
            dir.Dispose();
        }
コード例 #14
0
            public override void Eval(MockDirectoryWrapper dir)
            {
                // Since we throw exc during abort, eg when IW is
                // attempting to delete files, we will leave
                // leftovers:
                dir.AssertNoUnrefencedFilesOnClose = false;

                if (DoFail)
                {
                    var trace = new StackTrace();
                    bool sawAbortOrFlushDoc = false;
                    bool sawClose = false;
                    bool sawMerge = false;

                    foreach (var frame in trace.GetFrames())
                    {
                        var method = frame.GetMethod();
                        if (sawAbortOrFlushDoc && sawMerge && sawClose)
                        {
                            break;
                        }
                        if ("Abort".Equals(method.Name) || "FinishDocument".Equals(method.Name))
                        {
                            sawAbortOrFlushDoc = true;
                        }
                        if ("Merge".Equals(method.Name))
                        {
                            sawMerge = true;
                        }
                        if ("Close".Equals(method.Name) || "Dispose".Equals(method.Name))
                        {
                            sawClose = true;
                        }
                    }

                    if (sawAbortOrFlushDoc && !sawClose && !sawMerge)
                    {
                        if (OnlyOnce)
                        {
                            DoFail = false;
                        }
                        //System.out.println(Thread.currentThread().getName() + ": now fail");
                        //new Throwable(Console.WriteLine().StackTrace);
                        throw new IOException("now failing on purpose");
                    }
                }
            }
コード例 #15
0
        public virtual void TestAddIndexOnDiskFull()
        {
            // MemoryCodec, since it uses FST, is not necessarily
            // "additive", ie if you add up N small FSTs, then merge
            // them, the merged result can easily be larger than the
            // sum because the merged FST may use array encoding for
            // some arcs (which uses more space):

            string idFormat = TestUtil.GetPostingsFormat("id");
            string contentFormat = TestUtil.GetPostingsFormat("content");
            AssumeFalse("this test cannot run with Memory codec", idFormat.Equals("Memory") || contentFormat.Equals("Memory"));

            int START_COUNT = 57;
            int NUM_DIR = TEST_NIGHTLY ? 50 : 5;
            int END_COUNT = START_COUNT + NUM_DIR * (TEST_NIGHTLY ? 25 : 5);

            // Build up a bunch of dirs that have indexes which we
            // will then merge together by calling addIndexes(*):
            Directory[] dirs = new Directory[NUM_DIR];
            long inputDiskUsage = 0;
            for (int i = 0; i < NUM_DIR; i++)
            {
                dirs[i] = NewDirectory();
                IndexWriter writer = new IndexWriter(dirs[i], NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())));
                for (int j = 0; j < 25; j++)
                {
                    AddDocWithIndex(writer, 25 * i + j);
                }
                writer.Dispose();
                string[] files = dirs[i].ListAll();
                for (int j = 0; j < files.Length; j++)
                {
                    inputDiskUsage += dirs[i].FileLength(files[j]);
                }
            }

            // Now, build a starting index that has START_COUNT docs.  We
            // will then try to addIndexes into a copy of this:
            MockDirectoryWrapper startDir = NewMockDirectory();
            IndexWriter indWriter = new IndexWriter(startDir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())));
            for (int j = 0; j < START_COUNT; j++)
            {
                AddDocWithIndex(indWriter, j);
            }
            indWriter.Dispose();

            // Make sure starting index seems to be working properly:
            Term searchTerm = new Term("content", "aaa");
            IndexReader reader = DirectoryReader.Open(startDir);
            Assert.AreEqual(57, reader.DocFreq(searchTerm), "first docFreq");

            IndexSearcher searcher = NewSearcher(reader);
            ScoreDoc[] hits = searcher.Search(new TermQuery(searchTerm), null, 1000).ScoreDocs;
            Assert.AreEqual(57, hits.Length, "first number of hits");
            reader.Dispose();

            // Iterate with larger and larger amounts of free
            // disk space.  With little free disk space,
            // addIndexes will certainly run out of space &
            // fail.  Verify that when this happens, index is
            // not corrupt and index in fact has added no
            // documents.  Then, we increase disk space by 2000
            // bytes each iteration.  At some point there is
            // enough free disk space and addIndexes should
            // succeed and index should show all documents were
            // added.

            // String[] files = startDir.ListAll();
            long diskUsage = startDir.SizeInBytes();

            long startDiskUsage = 0;
            string[] files_ = startDir.ListAll();
            for (int i = 0; i < files_.Length; i++)
            {
                startDiskUsage += startDir.FileLength(files_[i]);
            }

            for (int iter = 0; iter < 3; iter++)
            {
                if (VERBOSE)
                {
                    Console.WriteLine("TEST: iter=" + iter);
                }

                // Start with 100 bytes more than we are currently using:
                long diskFree = diskUsage + TestUtil.NextInt(Random(), 50, 200);

                int method = iter;

                bool success = false;
                bool done = false;

                string methodName;
                if (0 == method)
                {
                    methodName = "addIndexes(Directory[]) + forceMerge(1)";
                }
                else if (1 == method)
                {
                    methodName = "addIndexes(IndexReader[])";
                }
                else
                {
                    methodName = "addIndexes(Directory[])";
                }

                while (!done)
                {
                    if (VERBOSE)
                    {
                        Console.WriteLine("TEST: cycle...");
                    }

                    // Make a new dir that will enforce disk usage:
                    MockDirectoryWrapper dir = new MockDirectoryWrapper(Random(), new RAMDirectory(startDir, NewIOContext(Random())));
                    indWriter = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode_e.APPEND).SetMergePolicy(NewLogMergePolicy(false)));
                    IOException err = null;

                    IMergeScheduler ms = indWriter.Config.MergeScheduler;
                    for (int x = 0; x < 2; x++)
                    {
                        if (ms is IConcurrentMergeScheduler)
                        // this test intentionally produces exceptions
                        // in the threads that CMS launches; we don't
                        // want to pollute test output with these.
                        {
                            if (0 == x)
                            {
                                ((IConcurrentMergeScheduler)ms).SetSuppressExceptions();
                            }
                            else
                            {
                                ((IConcurrentMergeScheduler)ms).ClearSuppressExceptions();
                            }
                        }

                        // Two loops: first time, limit disk space &
                        // throw random IOExceptions; second time, no
                        // disk space limit:

                        double rate = 0.05;
                        double diskRatio = ((double)diskFree) / diskUsage;
                        long thisDiskFree;

                        string testName = null;

                        if (0 == x)
                        {
                            dir.RandomIOExceptionRateOnOpen = Random().NextDouble() * 0.01;
                            thisDiskFree = diskFree;
                            if (diskRatio >= 2.0)
                            {
                                rate /= 2;
                            }
                            if (diskRatio >= 4.0)
                            {
                                rate /= 2;
                            }
                            if (diskRatio >= 6.0)
                            {
                                rate = 0.0;
                            }
                            if (VERBOSE)
                            {
                                testName = "disk full test " + methodName + " with disk full at " + diskFree + " bytes";
                            }
                        }
                        else
                        {
                            dir.RandomIOExceptionRateOnOpen = 0.0;
                            thisDiskFree = 0;
                            rate = 0.0;
                            if (VERBOSE)
                            {
                                testName = "disk full test " + methodName + " with unlimited disk space";
                            }
                        }

                        if (VERBOSE)
                        {
                            Console.WriteLine("\ncycle: " + testName);
                        }

                        dir.TrackDiskUsage = true;
                        dir.MaxSizeInBytes = thisDiskFree;
                        dir.RandomIOExceptionRate = rate;

                        try
                        {
                            if (0 == method)
                            {
                                if (VERBOSE)
                                {
                                    Console.WriteLine("TEST: now addIndexes count=" + dirs.Length);
                                }
                                indWriter.AddIndexes(dirs);
                                if (VERBOSE)
                                {
                                    Console.WriteLine("TEST: now forceMerge");
                                }
                                indWriter.ForceMerge(1);
                            }
                            else if (1 == method)
                            {
                                IndexReader[] readers = new IndexReader[dirs.Length];
                                for (int i = 0; i < dirs.Length; i++)
                                {
                                    readers[i] = DirectoryReader.Open(dirs[i]);
                                }
                                try
                                {
                                    indWriter.AddIndexes(readers);
                                }
                                finally
                                {
                                    for (int i = 0; i < dirs.Length; i++)
                                    {
                                        readers[i].Dispose();
                                    }
                                }
                            }
                            else
                            {
                                indWriter.AddIndexes(dirs);
                            }

                            success = true;
                            if (VERBOSE)
                            {
                                Console.WriteLine("  success!");
                            }

                            if (0 == x)
                            {
                                done = true;
                            }
                        }
                        catch (IOException e)
                        {
                            success = false;
                            err = e;
                            if (VERBOSE)
                            {
                                Console.WriteLine("  hit IOException: " + e);
                                Console.WriteLine(e.StackTrace);
                            }

                            if (1 == x)
                            {
                                Console.WriteLine(e.StackTrace);
                                Assert.Fail(methodName + " hit IOException after disk space was freed up");
                            }
                        }

                        // Make sure all threads from
                        // ConcurrentMergeScheduler are done
                        TestUtil.SyncConcurrentMerges(indWriter);

                        if (VERBOSE)
                        {
                            Console.WriteLine("  now test readers");
                        }

                        // Finally, verify index is not corrupt, and, if
                        // we succeeded, we see all docs added, and if we
                        // failed, we see either all docs or no docs added
                        // (transactional semantics):
                        dir.RandomIOExceptionRateOnOpen = 0.0;
                        try
                        {
                            reader = DirectoryReader.Open(dir);
                        }
                        catch (IOException e)
                        {
                            Console.WriteLine(e.StackTrace);
                            Assert.Fail(testName + ": exception when creating IndexReader: " + e);
                        }
                        int result = reader.DocFreq(searchTerm);
                        if (success)
                        {
                            if (result != START_COUNT)
                            {
                                Assert.Fail(testName + ": method did not throw exception but docFreq('aaa') is " + result + " instead of expected " + START_COUNT);
                            }
                        }
                        else
                        {
                            // On hitting exception we still may have added
                            // all docs:
                            if (result != START_COUNT && result != END_COUNT)
                            {
                                Console.WriteLine(err.StackTrace);
                                Assert.Fail(testName + ": method did throw exception but docFreq('aaa') is " + result + " instead of expected " + START_COUNT + " or " + END_COUNT);
                            }
                        }

                        searcher = NewSearcher(reader);
                        try
                        {
                            hits = searcher.Search(new TermQuery(searchTerm), null, END_COUNT).ScoreDocs;
                        }
                        catch (IOException e)
                        {
                            Console.WriteLine(e.StackTrace);
                            Assert.Fail(testName + ": exception when searching: " + e);
                        }
                        int result2 = hits.Length;
                        if (success)
                        {
                            if (result2 != result)
                            {
                                Assert.Fail(testName + ": method did not throw exception but hits.Length for search on term 'aaa' is " + result2 + " instead of expected " + result);
                            }
                        }
                        else
                        {
                            // On hitting exception we still may have added
                            // all docs:
                            if (result2 != result)
                            {
                                Console.WriteLine(err.StackTrace);
                                Assert.Fail(testName + ": method did throw exception but hits.Length for search on term 'aaa' is " + result2 + " instead of expected " + result);
                            }
                        }

                        reader.Dispose();
                        if (VERBOSE)
                        {
                            Console.WriteLine("  count is " + result);
                        }

                        if (done || result == END_COUNT)
                        {
                            break;
                        }
                    }

                    if (VERBOSE)
                    {
                        Console.WriteLine("  start disk = " + startDiskUsage + "; input disk = " + inputDiskUsage + "; max used = " + dir.MaxUsedSizeInBytes);
                    }

                    if (done)
                    {
                        // Javadocs state that temp free Directory space
                        // required is at most 2X total input size of
                        // indices so let's make sure:
                        Assert.IsTrue((dir.MaxUsedSizeInBytes - startDiskUsage) < 2 * (startDiskUsage + inputDiskUsage), "max free Directory space required exceeded 1X the total input index sizes during " + methodName + ": max temp usage = " + (dir.MaxUsedSizeInBytes - startDiskUsage) + " bytes vs limit=" + (2 * (startDiskUsage + inputDiskUsage)) + "; starting disk usage = " + startDiskUsage + " bytes; " + "input index disk usage = " + inputDiskUsage + " bytes");
                    }

                    // Make sure we don't hit disk full during close below:
                    dir.MaxSizeInBytes = 0;
                    dir.RandomIOExceptionRate = 0.0;
                    dir.RandomIOExceptionRateOnOpen = 0.0;

                    indWriter.Dispose();

                    // Wait for all BG threads to finish else
                    // dir.Dispose() will throw IOException because
                    // there are still open files
                    TestUtil.SyncConcurrentMerges(ms);

                    dir.Dispose();

                    // Try again with more free space:
                    diskFree += TEST_NIGHTLY ? TestUtil.NextInt(Random(), 4000, 8000) : TestUtil.NextInt(Random(), 40000, 80000);
                }
            }

            startDir.Dispose();
            foreach (Directory dir in dirs)
            {
                dir.Dispose();
            }
        }
コード例 #16
0
        public virtual void Test()
        {
            MockDirectoryWrapper dir = new MockDirectoryWrapper(Random(), new MMapDirectory(CreateTempDir("4GBStoredFields")));
            dir.Throttling = MockDirectoryWrapper.Throttling_e.NEVER;

            IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH).SetRAMBufferSizeMB(256.0).SetMergeScheduler(new ConcurrentMergeScheduler()).SetMergePolicy(NewLogMergePolicy(false, 10)).SetOpenMode(IndexWriterConfig.OpenMode_e.CREATE));

            MergePolicy mp = w.Config.MergePolicy;
            if (mp is LogByteSizeMergePolicy)
            {
                // 1 petabyte:
                ((LogByteSizeMergePolicy)mp).MaxMergeMB = 1024 * 1024 * 1024;
            }

            Document doc = new Document();
            FieldType ft = new FieldType();
            ft.Indexed = false;
            ft.Stored = true;
            ft.Freeze();
            int valueLength = RandomInts.NextIntBetween(Random(), 1 << 13, 1 << 20);
            var value = new byte[valueLength];
            for (int i = 0; i < valueLength; ++i)
            {
                // random so that even compressing codecs can't compress it
                value[i] = (byte)Random().Next(256);
            }
            Field f = new Field("fld", value, ft);
            doc.Add(f);

            int numDocs = (int)((1L << 32) / valueLength + 100);
            for (int i = 0; i < numDocs; ++i)
            {
                w.AddDocument(doc);
                if (VERBOSE && i % (numDocs / 10) == 0)
                {
                    Console.WriteLine(i + " of " + numDocs + "...");
                }
            }
            w.ForceMerge(1);
            w.Dispose();
            if (VERBOSE)
            {
                bool found = false;
                foreach (string file in dir.ListAll())
                {
                    if (file.EndsWith(".fdt"))
                    {
                        long fileLength = dir.FileLength(file);
                        if (fileLength >= 1L << 32)
                        {
                            found = true;
                        }
                        Console.WriteLine("File length of " + file + " : " + fileLength);
                    }
                }
                if (!found)
                {
                    Console.WriteLine("No .fdt file larger than 4GB, test bug?");
                }
            }

            DirectoryReader rd = DirectoryReader.Open(dir);
            Document sd = rd.Document(numDocs - 1);
            Assert.IsNotNull(sd);
            Assert.AreEqual(1, sd.Fields.Count);
            BytesRef valueRef = sd.GetBinaryValue("fld");
            Assert.IsNotNull(valueRef);
            Assert.AreEqual(new BytesRef(value), valueRef);
            rd.Dispose();

            dir.Dispose();
        }
コード例 #17
0
            public override void Eval(MockDirectoryWrapper dir)
            {
                if (DoFail && TestThread())
                {
                    bool isDoFlush = false;
                    bool isClose = false;
                    var trace = new StackTrace();
                    foreach (var frame in trace.GetFrames())
                    {
                        var method = frame.GetMethod();
                        if (isDoFlush && isClose)
                        {
                            break;
                        }
                        if ("flush".Equals(method.Name))
                        {
                            isDoFlush = true;
                        }
                        if ("close".Equals(method.Name))
                        {
                            isClose = true;
                        }
                    }

                    if (isDoFlush && !isClose && Random().NextBoolean())
                    {
                        HitExc = true;
                        throw new IOException(Thread.CurrentThread.Name + ": now failing during flush");
                    }
                }
            }
コード例 #18
0
        public virtual void TestAddDocumentOnDiskFull()
        {
            for (int pass = 0; pass < 2; pass++)
            {
                if (VERBOSE)
                {
                    Console.WriteLine("TEST: pass="******"TEST: cycle: diskFree=" + diskFree);
                    }
                    MockDirectoryWrapper dir = new MockDirectoryWrapper(Random(), new RAMDirectory());
                    dir.MaxSizeInBytes = diskFree;
                    IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())));
                    IMergeScheduler ms = writer.Config.MergeScheduler;
                    if (ms is IConcurrentMergeScheduler)
                    {
                        // this test intentionally produces exceptions
                        // in the threads that CMS launches; we don't
                        // want to pollute test output with these.
                        ((IConcurrentMergeScheduler)ms).SetSuppressExceptions();
                    }

                    bool hitError = false;
                    try
                    {
                        for (int i = 0; i < 200; i++)
                        {
                            AddDoc(writer);
                        }
                        if (VERBOSE)
                        {
                            Console.WriteLine("TEST: done adding docs; now commit");
                        }
                        writer.Commit();
                    }
                    catch (IOException e)
                    {
                        if (VERBOSE)
                        {
                            Console.WriteLine("TEST: exception on addDoc");
                            Console.WriteLine(e.StackTrace);
                        }
                        hitError = true;
                    }

                    if (hitError)
                    {
                        if (doAbort)
                        {
                            if (VERBOSE)
                            {
                                Console.WriteLine("TEST: now rollback");
                            }
                            writer.Rollback();
                        }
                        else
                        {
                            try
                            {
                                if (VERBOSE)
                                {
                                    Console.WriteLine("TEST: now close");
                                }
                                writer.Dispose();
                            }
                            catch (IOException e)
                            {
                                if (VERBOSE)
                                {
                                    Console.WriteLine("TEST: exception on close; retry w/ no disk space limit");
                                    Console.WriteLine(e.StackTrace);
                                }
                                dir.MaxSizeInBytes = 0;
                                writer.Dispose();
                            }
                        }

                        //TestUtil.SyncConcurrentMerges(ms);

                        if (TestUtil.AnyFilesExceptWriteLock(dir))
                        {
                            TestIndexWriter.AssertNoUnreferencedFiles(dir, "after disk full during addDocument");

                            // Make sure reader can open the index:
                            DirectoryReader.Open(dir).Dispose();
                        }

                        dir.Dispose();
                        // Now try again w/ more space:

                        diskFree += TEST_NIGHTLY ? TestUtil.NextInt(Random(), 400, 600) : TestUtil.NextInt(Random(), 3000, 5000);
                    }
                    else
                    {
                        //TestUtil.SyncConcurrentMerges(writer);
                        dir.MaxSizeInBytes = 0;
                        writer.Dispose();
                        dir.Dispose();
                        break;
                    }
                }
            }
        }
コード例 #19
0
            public override void Eval(MockDirectoryWrapper dir)
            {
                if (!DoFail)
                {
                    return;
                }

                var trace = new StackTrace();
                foreach (var frame in trace.GetFrames())
                {
                    var method = frame.GetMethod();
                    if (/*typeof(SegmentMerger).Name.Equals(frame.GetType().Name) && */"MergeTerms".Equals(method.Name) && !DidFail1)
                    {
                        DidFail1 = true;
                        throw new IOException("fake disk full during mergeTerms");
                    }
                    if (/*typeof(LiveDocsFormat).Name.Equals(frame.GetType().Name) && */"WriteLiveDocs".Equals(method.Name) && !DidFail2)
                    {
                        DidFail2 = true;
                        throw new IOException("fake disk full while writing LiveDocs");
                    }
                }
            }
コード例 #20
0
        // Runs test, with multiple threads, using the specific
        // failure to trigger an IOException
        public virtual void _testMultipleThreadsFailure(MockDirectoryWrapper.Failure failure)
        {
            int NUM_THREADS = 3;

            for (int iter = 0; iter < 2; iter++)
            {
                if (VERBOSE)
                {
                    Console.WriteLine("TEST: iter=" + iter);
                }
                MockDirectoryWrapper dir = NewMockDirectory();

                IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMaxBufferedDocs(2).SetMergeScheduler(new ConcurrentMergeScheduler()).SetMergePolicy(NewLogMergePolicy(4)));
                ((ConcurrentMergeScheduler)writer.Config.MergeScheduler).SetSuppressExceptions();

                IndexerThread[] threads = new IndexerThread[NUM_THREADS];

                for (int i = 0; i < NUM_THREADS; i++)
                {
                    threads[i] = new IndexerThread(writer, true);
                }

                for (int i = 0; i < NUM_THREADS; i++)
                {
                    threads[i].Start();
                }

                Thread.Sleep(10);

                dir.FailOn(failure);
                failure.SetDoFail();

                for (int i = 0; i < NUM_THREADS; i++)
                {
                    threads[i].Join();
                    Assert.IsTrue(threads[i].Error == null, "hit unexpected Throwable");
                }

                bool success = false;
                try
                {
                    writer.Dispose(false);
                    success = true;
                }
                catch (IOException ioe)
                {
                    failure.ClearDoFail();
                    writer.Dispose(false);
                }
                if (VERBOSE)
                {
                    Console.WriteLine("TEST: success=" + success);
                }

                if (success)
                {
                    IndexReader reader = DirectoryReader.Open(dir);
                    Bits delDocs = MultiFields.GetLiveDocs(reader);
                    for (int j = 0; j < reader.MaxDoc; j++)
                    {
                        if (delDocs == null || !delDocs.Get(j))
                        {
                            reader.Document(j);
                            reader.GetTermVectors(j);
                        }
                    }
                    reader.Dispose();
                }

                dir.Dispose();
            }
        }
コード例 #21
0
 public override void Run()
 {
     try
     {
         Directory[] dirs = new Directory[OuterInstance.NumDirs];
         for (int k = 0; k < OuterInstance.NumDirs; k++)
         {
             dirs[k] = new MockDirectoryWrapper(Random(), new RAMDirectory(OuterInstance.AddDir, NewIOContext(Random())));
         }
         //int j = 0;
         //while (true) {
         // System.out.println(Thread.currentThread().getName() + ": iter
         // j=" + j);
         for (int x = 0; x < NumIter; x++)
         {
             // only do addIndexes
             OuterInstance.DoBody(x, dirs);
         }
         //if (numIter > 0 && j == numIter)
         //  break;
         //doBody(j++, dirs);
         //doBody(5, dirs);
         //}
     }
     catch (Exception t)
     {
         OuterInstance.Handle(t);
     }
 }
コード例 #22
0
 public override void Eval(MockDirectoryWrapper dir)
 {
     if (DoFail)
     {
         var trace = new StackTrace();
         foreach (var frame in trace.GetFrames())
         {
             var method = frame.GetMethod();
             if ("Flush".Equals(method.Name) /*&& "Lucene.Net.Index.DocFieldProcessor".Equals(frame.GetType().Name)*/)
             {
                 if (OnlyOnce)
                 {
                     DoFail = false;
                 }
                 //System.out.println(Thread.currentThread().getName() + ": NOW FAIL: onlyOnce=" + onlyOnce);
                 //new Throwable(Console.WriteLine().StackTrace);
                 throw new IOException("now failing on purpose");
             }
         }
     }
 }
コード例 #23
0
 public override void Eval(MockDirectoryWrapper dir)
 {
     var trace = new StackTrace();
     if (ShouldFail.Get())
     {
         foreach (var frame in trace.GetFrames())
         {
             var method = frame.GetMethod();
             if ("GetReadOnlyClone".Equals(method.Name))
             {
                 if (VERBOSE)
                 {
                     Console.WriteLine("TEST: now fail; exc:");
                     Console.WriteLine((new Exception()).StackTrace);
                 }
                 ShouldFail.Set(false);
                 throw new FakeIOException();
             }
         }
     }
 }
コード例 #24
0
ファイル: QueryUtils.cs プロジェクト: WakeflyCBass/lucenenet
 private static IndexReader MakeEmptyIndex(Random random, int numDocs)
 {
     Debug.Assert(numDocs > 0);
     Directory d = new MockDirectoryWrapper(random, new RAMDirectory());
     IndexWriter w = new IndexWriter(d, new IndexWriterConfig(LuceneTestCase.TEST_VERSION_CURRENT, new MockAnalyzer(random)));
     for (int i = 0; i < numDocs; i++)
     {
         w.AddDocument(new Document());
     }
     w.ForceMerge(1);
     w.Commit();
     w.Dispose();
     DirectoryReader reader = DirectoryReader.Open(d);
     return new AllDeletedFilterReader(LuceneTestCase.GetOnlySegmentReader(reader));
 }
コード例 #25
0
        public virtual void TestDuringAddIndexes()
        {
            Directory dir1 = GetAssertNoDeletesDirectory(NewDirectory());
            IndexWriter writer = new IndexWriter(dir1, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMergePolicy(NewLogMergePolicy(2)));

            // create the index
            CreateIndexNoClose(false, "test", writer);
            writer.Commit();

            Directory[] dirs = new Directory[10];
            for (int i = 0; i < 10; i++)
            {
                dirs[i] = new MockDirectoryWrapper(Random(), new RAMDirectory(dir1, NewIOContext(Random())));
            }

            DirectoryReader r = writer.Reader;

            const float SECONDS = 0.5f;

            long endTime = (long)(Environment.TickCount + 1000.0 * SECONDS);
            IList<Exception> excs = new SynchronizedCollection<Exception>();

            // Only one thread can addIndexes at a time, because
            // IndexWriter acquires a write lock in each directory:
            var threads = new ThreadClass[1];
            for (int i = 0; i < threads.Length; i++)
            {
                threads[i] = new ThreadAnonymousInnerClassHelper(writer, dirs, endTime, excs);
                threads[i].SetDaemon(true);
                threads[i].Start();
            }

            int lastCount = 0;
            while (Environment.TickCount < endTime)
            {
                DirectoryReader r2 = DirectoryReader.OpenIfChanged(r);
                if (r2 != null)
                {
                    r.Dispose();
                    r = r2;
                }
                Query q = new TermQuery(new Term("indexname", "test"));
                IndexSearcher searcher = NewSearcher(r);
                int count = searcher.Search(q, 10).TotalHits;
                Assert.IsTrue(count >= lastCount);
                lastCount = count;
            }

            for (int i = 0; i < threads.Length; i++)
            {
                threads[i].Join();
            }
            // final check
            DirectoryReader dr2 = DirectoryReader.OpenIfChanged(r);
            if (dr2 != null)
            {
                r.Dispose();
                r = dr2;
            }
            Query q2 = new TermQuery(new Term("indexname", "test"));
            IndexSearcher searcher_ = NewSearcher(r);
            int count_ = searcher_.Search(q2, 10).TotalHits;
            Assert.IsTrue(count_ >= lastCount);

            Assert.AreEqual(0, excs.Count);
            r.Dispose();
            if (dir1 is MockDirectoryWrapper)
            {
                ICollection<string> openDeletedFiles = ((MockDirectoryWrapper)dir1).OpenDeletedFiles;
                Assert.AreEqual(0, openDeletedFiles.Count, "openDeleted=" + openDeletedFiles);
            }

            writer.Dispose();

            dir1.Dispose();
        }
コード例 #26
0
ファイル: TestCrash.cs プロジェクト: WakeflyCBass/lucenenet
        private IndexWriter InitIndex(Random random, MockDirectoryWrapper dir, bool initialCommit)
        {
            dir.LockFactory = NoLockFactory.DoNoLockFactory;

            IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).SetMaxBufferedDocs(10).SetMergeScheduler(new ConcurrentMergeScheduler()));
            ((ConcurrentMergeScheduler)writer.Config.MergeScheduler).SetSuppressExceptions();
            if (initialCommit)
            {
                writer.Commit();
            }

            Document doc = new Document();
            doc.Add(NewTextField("content", "aaa", Field.Store.NO));
            doc.Add(NewTextField("id", "0", Field.Store.NO));
            for (int i = 0; i < 157; i++)
            {
                writer.AddDocument(doc);
            }

            return writer;
        }
コード例 #27
0
            public override void Eval(MockDirectoryWrapper dir)
            {
                if (Thread.CurrentThread != thread)
                {
                    // don't fail during merging
                    return;
                }
                if (sawMaybe && !failed)
                {
                    bool seen = false;
                    var trace = new StackTrace();
                    foreach (var frame in trace.GetFrames())
                    {
                        var method = frame.GetMethod();
                        if ("ApplyDeletesAndUpdates".Equals(method.Name) || "SlowFileExists".Equals(method.Name))
                        {
                            seen = true;
                            break;
                        }
                    }

                    if (!seen)
                    {
                        // Only fail once we are no longer in applyDeletes
                        failed = true;
                        if (VERBOSE)
                        {
                            Console.WriteLine("TEST: mock failure: now fail");
                            Console.WriteLine((new Exception()).StackTrace);
                        }
                        throw new IOException("fail after applyDeletes");
                    }
                }
                if (!failed)
                {
                    var trace = new StackTrace();
                    foreach (var frame in trace.GetFrames())
                    {
                        var method = frame.GetMethod();
                        if ("ApplyDeletesAndUpdates".Equals(method.Name))
                        {
                            if (VERBOSE)
                            {
                                Console.WriteLine("TEST: mock failure: saw applyDeletes");
                                Console.WriteLine((new Exception()).StackTrace);
                            }
                            sawMaybe = true;
                            break;
                        }
                    }
                }
            }
コード例 #28
0
 public override void Eval(MockDirectoryWrapper dir)
 {
     var trace = new StackTrace();
     foreach (var frame in trace.GetFrames())
     {
         var method = frame.GetMethod();
         if ("DoMerge".Equals(method.Name))
         {
             throw new IOException("now failing during merge");
         }
     }
 }
コード例 #29
0
 public override void Eval(MockDirectoryWrapper dir)
 {
     if (!failed)
     {
         failed = true;
         throw new IOException("fail in add doc");
     }
 }
コード例 #30
0
 private void DoTestWriteRead(int n)
 {
     MockDirectoryWrapper d = new MockDirectoryWrapper(Random(), new RAMDirectory());
     d.PreventDoubleWrite = false;
     BitVector bv = new BitVector(n);
     // test count when incrementally setting bits
     for (int i = 0; i < bv.Size(); i++)
     {
         Assert.IsFalse(bv.Get(i));
         Assert.AreEqual(i, bv.Count());
         bv.Set(i);
         Assert.IsTrue(bv.Get(i));
         Assert.AreEqual(i + 1, bv.Count());
         bv.Write(d, "TESTBV", NewIOContext(Random()));
         BitVector compare = new BitVector(d, "TESTBV", NewIOContext(Random()));
         // compare bit vectors with bits set incrementally
         Assert.IsTrue(DoCompare(bv, compare));
     }
 }
コード例 #31
0
        /// <summary>
        /// Make sure if modifier tries to commit but hits disk full that modifier
        /// remains consistent and usable. Similar to TestIndexReader.testDiskFull().
        /// </summary>
        private void DoTestOperationsOnDiskFull(bool updates)
        {
            Term searchTerm = new Term("content", "aaa");
            int START_COUNT = 157;
            int END_COUNT = 144;

            // First build up a starting index:
            MockDirectoryWrapper startDir = NewMockDirectory();
            // TODO: find the resource leak that only occurs sometimes here.
            startDir.NoDeleteOpenFile = false;
            IndexWriter writer = new IndexWriter(startDir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random(), MockTokenizer.WHITESPACE, false)));
            for (int i = 0; i < 157; i++)
            {
                Document d = new Document();
                d.Add(NewStringField("id", Convert.ToString(i), Field.Store.YES));
                d.Add(NewTextField("content", "aaa " + i, Field.Store.NO));
                if (DefaultCodecSupportsDocValues())
                {
                    d.Add(new NumericDocValuesField("dv", i));
                }
                writer.AddDocument(d);
            }
            writer.Dispose();

            long diskUsage = startDir.SizeInBytes();
            long diskFree = diskUsage + 10;

            IOException err = null;

            bool done = false;

            // Iterate w/ ever increasing free disk space:
            while (!done)
            {
                if (VERBOSE)
                {
                    Console.WriteLine("TEST: cycle");
                }
                MockDirectoryWrapper dir = new MockDirectoryWrapper(Random(), new RAMDirectory(startDir, NewIOContext(Random())));
                dir.PreventDoubleWrite = false;
                dir.AllowRandomFileNotFoundException = false;
                IndexWriter modifier = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random(), MockTokenizer.WHITESPACE, false)).SetMaxBufferedDocs(1000).SetMaxBufferedDeleteTerms(1000).SetMergeScheduler(new ConcurrentMergeScheduler()));
                ((ConcurrentMergeScheduler)modifier.Config.MergeScheduler).SetSuppressExceptions();

                // For each disk size, first try to commit against
                // dir that will hit random IOExceptions & disk
                // full; after, give it infinite disk space & turn
                // off random IOExceptions & retry w/ same reader:
                bool success = false;

                for (int x = 0; x < 2; x++)
                {
                    if (VERBOSE)
                    {
                        Console.WriteLine("TEST: x=" + x);
                    }

                    double rate = 0.1;
                    double diskRatio = ((double)diskFree) / diskUsage;
                    long thisDiskFree;
                    string testName;

                    if (0 == x)
                    {
                        thisDiskFree = diskFree;
                        if (diskRatio >= 2.0)
                        {
                            rate /= 2;
                        }
                        if (diskRatio >= 4.0)
                        {
                            rate /= 2;
                        }
                        if (diskRatio >= 6.0)
                        {
                            rate = 0.0;
                        }
                        if (VERBOSE)
                        {
                            Console.WriteLine("\ncycle: " + diskFree + " bytes");
                        }
                        testName = "disk full during reader.Dispose() @ " + thisDiskFree + " bytes";
                        dir.RandomIOExceptionRateOnOpen = Random().NextDouble() * 0.01;
                    }
                    else
                    {
                        thisDiskFree = 0;
                        rate = 0.0;
                        if (VERBOSE)
                        {
                            Console.WriteLine("\ncycle: same writer: unlimited disk space");
                        }
                        testName = "reader re-use after disk full";
                        dir.RandomIOExceptionRateOnOpen = 0.0;
                    }

                    dir.MaxSizeInBytes = thisDiskFree;
                    dir.RandomIOExceptionRate = rate;

                    try
                    {
                        if (0 == x)
                        {
                            int docId = 12;
                            for (int i = 0; i < 13; i++)
                            {
                                if (updates)
                                {
                                    Document d = new Document();
                                    d.Add(NewStringField("id", Convert.ToString(i), Field.Store.YES));
                                    d.Add(NewTextField("content", "bbb " + i, Field.Store.NO));
                                    if (DefaultCodecSupportsDocValues())
                                    {
                                        d.Add(new NumericDocValuesField("dv", i));
                                    }
                                    modifier.UpdateDocument(new Term("id", Convert.ToString(docId)), d);
                                } // deletes
                                else
                                {
                                    modifier.DeleteDocuments(new Term("id", Convert.ToString(docId)));
                                    // modifier.setNorm(docId, "contents", (float)2.0);
                                }
                                docId += 12;
                            }
                        }
                        modifier.Dispose();
                        success = true;
                        if (0 == x)
                        {
                            done = true;
                        }
                    }
                    catch (IOException e)
                    {
                        if (VERBOSE)
                        {
                            Console.WriteLine("  hit IOException: " + e);
                            Console.WriteLine(e.StackTrace);
                        }
                        err = e;
                        if (1 == x)
                        {
                            Console.WriteLine(e.ToString());
                            Console.Write(e.StackTrace);
                            Assert.Fail(testName + " hit IOException after disk space was freed up");
                        }
                    }
                    // prevent throwing a random exception here!!
                    double randomIOExceptionRate = dir.RandomIOExceptionRate;
                    long maxSizeInBytes = dir.MaxSizeInBytes;
                    dir.RandomIOExceptionRate = 0.0;
                    dir.RandomIOExceptionRateOnOpen = 0.0;
                    dir.MaxSizeInBytes = 0;
                    if (!success)
                    {
                        // Must force the close else the writer can have
                        // open files which cause exc in MockRAMDir.close
                        if (VERBOSE)
                        {
                            Console.WriteLine("TEST: now rollback");
                        }
                        modifier.Rollback();
                    }

                    // If the close() succeeded, make sure there are
                    // no unreferenced files.
                    if (success)
                    {
                        TestUtil.CheckIndex(dir);
                        TestIndexWriter.AssertNoUnreferencedFiles(dir, "after writer.close");
                    }
                    dir.RandomIOExceptionRate = randomIOExceptionRate;
                    dir.MaxSizeInBytes = maxSizeInBytes;

                    // Finally, verify index is not corrupt, and, if
                    // we succeeded, we see all docs changed, and if
                    // we failed, we see either all docs or no docs
                    // changed (transactional semantics):
                    IndexReader newReader = null;
                    try
                    {
                        newReader = DirectoryReader.Open(dir);
                    }
                    catch (IOException e)
                    {
                        Console.WriteLine(e.ToString());
                        Console.Write(e.StackTrace);
                        Assert.Fail(testName + ":exception when creating IndexReader after disk full during close: " + e);
                    }

                    IndexSearcher searcher = NewSearcher(newReader);
                    ScoreDoc[] hits = null;
                    try
                    {
                        hits = searcher.Search(new TermQuery(searchTerm), null, 1000).ScoreDocs;
                    }
                    catch (IOException e)
                    {
                        Console.WriteLine(e.ToString());
                        Console.Write(e.StackTrace);
                        Assert.Fail(testName + ": exception when searching: " + e);
                    }
                    int result2 = hits.Length;
                    if (success)
                    {
                        if (x == 0 && result2 != END_COUNT)
                        {
                            Assert.Fail(testName + ": method did not throw exception but hits.Length for search on term 'aaa' is " + result2 + " instead of expected " + END_COUNT);
                        }
                        else if (x == 1 && result2 != START_COUNT && result2 != END_COUNT)
                        {
                            // It's possible that the first exception was
                            // "recoverable" wrt pending deletes, in which
                            // case the pending deletes are retained and
                            // then re-flushing (with plenty of disk
                            // space) will succeed in flushing the
                            // deletes:
                            Assert.Fail(testName + ": method did not throw exception but hits.Length for search on term 'aaa' is " + result2 + " instead of expected " + START_COUNT + " or " + END_COUNT);
                        }
                    }
                    else
                    {
                        // On hitting exception we still may have added
                        // all docs:
                        if (result2 != START_COUNT && result2 != END_COUNT)
                        {
                            Console.WriteLine(err.ToString());
                            Console.Write(err.StackTrace);
                            Assert.Fail(testName + ": method did throw exception but hits.Length for search on term 'aaa' is " + result2 + " instead of expected " + START_COUNT + " or " + END_COUNT);
                        }
                    }
                    newReader.Dispose();
                    if (result2 == END_COUNT)
                    {
                        break;
                    }
                }
                dir.Dispose();
                modifier.Dispose();

                // Try again with 10 more bytes of free space:
                diskFree += 10;
            }
            startDir.Dispose();
        }
コード例 #32
0
        private static BaseDirectoryWrapper WrapDirectory(Random random, Directory directory, bool bare)
        {
            if (Rarely(random))
            {
                directory = new NRTCachingDirectory(directory, random.NextDouble(), random.NextDouble());
            }

            if (Rarely(random))
            {
                double maxMBPerSec = 10 + 5 * (random.NextDouble() - 0.5);
                if (LuceneTestCase.VERBOSE)
                {
                    Console.WriteLine("LuceneTestCase: will rate limit output IndexOutput to " + maxMBPerSec + " MB/sec");
                }
                RateLimitedDirectoryWrapper rateLimitedDirectoryWrapper = new RateLimitedDirectoryWrapper(directory);
                switch (random.Next(10))
                {
                    case 3: // sometimes rate limit on flush
                        rateLimitedDirectoryWrapper.SetMaxWriteMBPerSec(maxMBPerSec, IOContext.UsageContext.FLUSH);
                        break;

                    case 2: // sometimes rate limit flush & merge
                        rateLimitedDirectoryWrapper.SetMaxWriteMBPerSec(maxMBPerSec, IOContext.UsageContext.FLUSH);
                        rateLimitedDirectoryWrapper.SetMaxWriteMBPerSec(maxMBPerSec, IOContext.UsageContext.MERGE);
                        break;

                    default:
                        rateLimitedDirectoryWrapper.SetMaxWriteMBPerSec(maxMBPerSec, IOContext.UsageContext.MERGE);
                        break;
                }
                directory = rateLimitedDirectoryWrapper;
            }

            if (bare)
            {
                BaseDirectoryWrapper @base = new BaseDirectoryWrapper(directory);
                // LUCENENET TODO CloseAfterSuite(new IDisposableDirectory(@base, SuiteFailureMarker));
                return @base;
            }
            else
            {
                MockDirectoryWrapper mock = new MockDirectoryWrapper(random, directory);

                mock.Throttling = TEST_THROTTLING;
                // LUCENENET TODO CloseAfterSuite(new IDisposableDirectory(mock, SuiteFailureMarker));
                return mock;
            }
        }
コード例 #33
0
        public virtual void TestDeletes1()
        {
            //IndexWriter.debug2 = System.out;
            Directory dir = new MockDirectoryWrapper(new Random(Random().Next()), new RAMDirectory());
            IndexWriterConfig iwc = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()));
            iwc.SetMergeScheduler(new SerialMergeScheduler());
            iwc.SetMaxBufferedDocs(5000);
            iwc.SetRAMBufferSizeMB(100);
            RangeMergePolicy fsmp = new RangeMergePolicy(this, false);
            iwc.SetMergePolicy(fsmp);
            IndexWriter writer = new IndexWriter(dir, iwc);
            for (int x = 0; x < 5; x++)
            {
                writer.AddDocument(DocHelper.CreateDocument(x, "1", 2));
                //System.out.println("numRamDocs(" + x + ")" + writer.numRamDocs());
            }
            //System.out.println("commit1");
            writer.Commit();
            Assert.AreEqual(1, writer.SegmentCount);
            for (int x = 5; x < 10; x++)
            {
                writer.AddDocument(DocHelper.CreateDocument(x, "2", 2));
                //System.out.println("numRamDocs(" + x + ")" + writer.numRamDocs());
            }
            //System.out.println("commit2");
            writer.Commit();
            Assert.AreEqual(2, writer.SegmentCount);

            for (int x = 10; x < 15; x++)
            {
                writer.AddDocument(DocHelper.CreateDocument(x, "3", 2));
                //System.out.println("numRamDocs(" + x + ")" + writer.numRamDocs());
            }

            writer.DeleteDocuments(new Term("id", "1"));

            writer.DeleteDocuments(new Term("id", "11"));

            // flushing without applying deletes means
            // there will still be deletes in the segment infos
            writer.Flush(false, false);
            Assert.IsTrue(writer.BufferedUpdatesStreamAny);

            // get reader flushes pending deletes
            // so there should not be anymore
            IndexReader r1 = writer.Reader;
            Assert.IsFalse(writer.BufferedUpdatesStreamAny);
            r1.Dispose();

            // delete id:2 from the first segment
            // merge segments 0 and 1
            // which should apply the delete id:2
            writer.DeleteDocuments(new Term("id", "2"));
            writer.Flush(false, false);
            fsmp = (RangeMergePolicy)writer.Config.MergePolicy;
            fsmp.DoMerge = true;
            fsmp.Start = 0;
            fsmp.Length = 2;
            writer.MaybeMerge();

            Assert.AreEqual(2, writer.SegmentCount);

            // id:2 shouldn't exist anymore because
            // it's been applied in the merge and now it's gone
            IndexReader r2 = writer.Reader;
            int[] id2docs = ToDocsArray(new Term("id", "2"), null, r2);
            Assert.IsTrue(id2docs == null);
            r2.Dispose();

            /*
            /// // added docs are in the ram buffer
            /// for (int x = 15; x < 20; x++) {
            ///  writer.AddDocument(TestIndexWriterReader.CreateDocument(x, "4", 2));
            ///  System.out.println("numRamDocs(" + x + ")" + writer.numRamDocs());
            /// }
            /// Assert.IsTrue(writer.numRamDocs() > 0);
            /// // delete from the ram buffer
            /// writer.DeleteDocuments(new Term("id", Integer.toString(13)));
            ///
            /// Term id3 = new Term("id", Integer.toString(3));
            ///
            /// // delete from the 1st segment
            /// writer.DeleteDocuments(id3);
            ///
            /// Assert.IsTrue(writer.numRamDocs() > 0);
            ///
            /// //System.out
            /// //    .println("segdels1:" + writer.docWriter.deletesToString());
            ///
            /// //Assert.IsTrue(writer.docWriter.segmentDeletes.Size() > 0);
            ///
            /// // we cause a merge to happen
            /// fsmp.doMerge = true;
            /// fsmp.start = 0;
            /// fsmp.Length = 2;
            /// System.out.println("maybeMerge "+writer.SegmentInfos);
            ///
            /// SegmentInfo info0 = writer.SegmentInfos.Info(0);
            /// SegmentInfo info1 = writer.SegmentInfos.Info(1);
            ///
            /// writer.MaybeMerge();
            /// System.out.println("maybeMerge after "+writer.SegmentInfos);
            /// // there should be docs in RAM
            /// Assert.IsTrue(writer.numRamDocs() > 0);
            ///
            /// // assert we've merged the 1 and 2 segments
            /// // and still have a segment leftover == 2
            /// Assert.AreEqual(2, writer.SegmentInfos.Size());
            /// Assert.IsFalse(segThere(info0, writer.SegmentInfos));
            /// Assert.IsFalse(segThere(info1, writer.SegmentInfos));
            ///
            /// //System.out.println("segdels2:" + writer.docWriter.deletesToString());
            ///
            /// //Assert.IsTrue(writer.docWriter.segmentDeletes.Size() > 0);
            ///
            /// IndexReader r = writer.GetReader();
            /// IndexReader r1 = r.getSequentialSubReaders()[0];
            /// printDelDocs(r1.GetLiveDocs());
            /// int[] docs = toDocsArray(id3, null, r);
            /// System.out.println("id3 docs:"+Arrays.toString(docs));
            /// // there shouldn't be any docs for id:3
            /// Assert.IsTrue(docs == null);
            /// r.Dispose();
            ///
            /// part2(writer, fsmp);
            ///
            */
            // System.out.println("segdels2:"+writer.docWriter.segmentDeletes.toString());
            //System.out.println("close");
            writer.Dispose();
            dir.Dispose();
        }