コード例 #1
0
        public void TestGetFilterHandleNumericParseError()
        {
            NumericRangeFilterBuilder filterBuilder = new NumericRangeFilterBuilder();

            filterBuilder.SetStrictMode(false);

            String      xml    = "<NumericRangeFilter fieldName='AGE' type='int' lowerTerm='-1' upperTerm='NaN'/>";
            XmlDocument doc    = GetDocumentFromString(xml);
            Filter      filter = filterBuilder.GetFilter(doc.DocumentElement);

            Store.Directory ramDir = NewDirectory();
            IndexWriter     writer = new IndexWriter(ramDir, NewIndexWriterConfig(TEST_VERSION_CURRENT, null));

            writer.Commit();
            try
            {
                AtomicReader reader = SlowCompositeReaderWrapper.Wrap(DirectoryReader.Open(ramDir));
                try
                {
                    assertNull(filter.GetDocIdSet(reader.AtomicContext, reader.LiveDocs));
                }
                finally
                {
                    reader.Dispose();
                }
            }
            finally
            {
                writer.Commit();
                writer.Dispose();
                ramDir.Dispose();
            }
        }
コード例 #2
0
        protected void CheckPerformance(IClassifier <T> classifier, Analyzer analyzer, String classFieldName)
        {
            AtomicReader atomicReader = null;
            long         trainStart   = J2N.Time.NanoTime() / J2N.Time.MillisecondsPerNanosecond; // LUCENENET: Use NanoTime() rather than CurrentTimeMilliseconds() for more accurate/reliable results

            try
            {
                PopulatePerformanceIndex(analyzer);
                atomicReader = SlowCompositeReaderWrapper.Wrap(indexWriter.GetReader());
                classifier.Train(atomicReader, textFieldName, classFieldName, analyzer);
                long trainEnd  = J2N.Time.NanoTime() / J2N.Time.MillisecondsPerNanosecond; // LUCENENET: Use NanoTime() rather than CurrentTimeMilliseconds() for more accurate/reliable results
                long trainTime = trainEnd - trainStart;
                // LUCENENET: This test is running slow on .NET Framework in CI, so we are giving it a little more time to complete.
#if NETFRAMEWORK
                Assert.IsTrue(trainTime < 150000, "training took more than 2.5 mins : " + trainTime / 1000 + "s");
#else
                Assert.IsTrue(trainTime < 120000, "training took more than 2 mins : " + trainTime / 1000 + "s");
#endif
            }
            finally
            {
                if (atomicReader != null)
                {
                    atomicReader.Dispose();
                }
            }
        }
コード例 #3
0
        public void RestDocsAndPositionsEnumStart()
        {
            Analyzer    analyzer = new MockAnalyzer(Random);
            int         numIters = AtLeast(3);
            MemoryIndex memory   = new MemoryIndex(true, Random.nextInt(50) * 1024 * 1024);

            for (int i = 0; i < numIters; i++)
            { // check reuse
                memory.AddField("foo", "bar", analyzer);
                AtomicReader reader = (AtomicReader)memory.CreateSearcher().IndexReader;
                assertEquals(1, reader.GetTerms("foo").SumTotalTermFreq);
                DocsAndPositionsEnum disi = reader.GetTermPositionsEnum(new Term("foo", "bar"));
                int docid = disi.DocID;
                assertEquals(-1, docid);
                assertTrue(disi.NextDoc() != DocIdSetIterator.NO_MORE_DOCS);
                assertEquals(0, disi.NextPosition());
                assertEquals(0, disi.StartOffset);
                assertEquals(3, disi.EndOffset);

                // now reuse and check again
                TermsEnum te = reader.GetTerms("foo").GetEnumerator();
                assertTrue(te.SeekExact(new BytesRef("bar")));
                disi  = te.DocsAndPositions(null, disi);
                docid = disi.DocID;
                assertEquals(-1, docid);
                assertTrue(disi.NextDoc() != DocIdSetIterator.NO_MORE_DOCS);
                reader.Dispose();
                memory.Reset();
            }
        }
コード例 #4
0
        protected void CheckOnlineClassification(IClassifier <T> classifier, String inputDoc, T expectedResult, Analyzer analyzer, String textFieldName, String classFieldName, Query query)
        {
            AtomicReader atomicReader = null;

            try
            {
                PopulateSampleIndex(analyzer);
                atomicReader = SlowCompositeReaderWrapper.Wrap(indexWriter.GetReader());
                classifier.Train(atomicReader, textFieldName, classFieldName, analyzer, query);
                ClassificationResult <T> classificationResult = classifier.AssignClass(inputDoc);
                Assert.NotNull(classificationResult.AssignedClass);
                Assert.AreEqual(expectedResult, classificationResult.AssignedClass, "got an assigned class of " + classificationResult.AssignedClass);
                Assert.IsTrue(classificationResult.Score > 0, "got a not positive score " + classificationResult.Score);
                UpdateSampleIndex(analyzer);
                ClassificationResult <T> secondClassificationResult = classifier.AssignClass(inputDoc);
                Equals(classificationResult.AssignedClass, secondClassificationResult.AssignedClass);
                Equals(classificationResult.Score, secondClassificationResult.Score);
            }
            finally
            {
                if (atomicReader != null)
                {
                    atomicReader.Dispose();
                }
            }
        }
コード例 #5
0
ファイル: SorterTestBase.cs プロジェクト: zhuthree/lucenenet
        public override void AfterClass() // LUCENENET specific - renamed from AfterClassSorterTestBase() to ensure calling order vs base class
        {
            reader.Dispose();
            dir.Dispose();

            base.AfterClass();
        }
コード例 #6
0
 public override void TearDown()
 {
     _originalIndex.Dispose();
     _indexWriter.Dispose();
     _dir.Dispose();
     base.TearDown();
 }
コード例 #7
0
 public static void AfterClass()
 {
     Reader.Dispose();
     Reader = null;
     Directory.Dispose();
     Directory      = null;
     UnicodeStrings = null;
     MultiValued    = null;
 }
コード例 #8
0
 public override void AfterClass()
 {
     Reader.Dispose();
     Reader = null;
     Directory.Dispose();
     Directory      = null;
     UnicodeStrings = null;
     MultiValued    = null;
     base.AfterClass();
 }
コード例 #9
0
 public override void AfterClass()
 {
     reader.Dispose();
     reader = null;
     directory.Dispose();
     directory      = null;
     unicodeStrings = null;
     multiValued    = null;
     base.AfterClass();
 }
コード例 #10
0
 public override void TearDown()
 {
     ReaderA.Dispose();
     ReaderAclone.Dispose();
     ReaderB.Dispose();
     ReaderX.Dispose();
     DirA.Dispose();
     DirB.Dispose();
     base.TearDown();
 }
コード例 #11
0
        public override void TearDown()
        {
            ReaderA.Dispose();
            ReaderAclone.Dispose();
            ReaderB.Dispose();
            ReaderX.Dispose();
            DirA.Dispose();
            DirB.Dispose();

            // LUCENENET specific. See <see cref="SetUp()"/>. Dispose our InfoStream and set it to null
            // to avoid polluting the state of other tests.
            FieldCache.DEFAULT.InfoStream.Dispose();
            FieldCache.DEFAULT.InfoStream = null;
            base.TearDown();
        }
コード例 #12
0
        public virtual void TestNoOrds()
        {
            Directory         dir = NewDirectory();
            RandomIndexWriter iw  = new RandomIndexWriter(
#if FEATURE_INSTANCE_TESTDATA_INITIALIZATION
                this,
#endif
                Random, dir);
            Document  doc = new Document();
            FieldType ft  = new FieldType(TextField.TYPE_NOT_STORED);

            ft.StoreTermVectors = true;
            doc.Add(new Field("foo", "this is a test", ft));
            iw.AddDocument(doc);
            AtomicReader ir    = GetOnlySegmentReader(iw.GetReader());
            Terms        terms = ir.GetTermVector(0, "foo");

            Assert.IsNotNull(terms);
            TermsEnum termsEnum = terms.GetEnumerator();

            Assert.AreEqual(TermsEnum.SeekStatus.FOUND, termsEnum.SeekCeil(new BytesRef("this")));
            try
            {
                var _ = termsEnum.Ord;
                Assert.Fail();
            }
#pragma warning disable 168
            catch (NotSupportedException expected)
#pragma warning restore 168
            {
                // expected exception
            }

            try
            {
                termsEnum.SeekExact(0);
                Assert.Fail();
            }
#pragma warning disable 168
            catch (NotSupportedException expected)
#pragma warning restore 168
            {
                // expected exception
            }
            ir.Dispose();
            iw.Dispose();
            dir.Dispose();
        }
コード例 #13
0
        public virtual void TestChangeGaps()
        {
            // LUCENE-5324: check that it is possible to change the wrapper's gaps
            int      positionGap = Random.Next(1000);
            int      offsetGap   = Random.Next(1000);
            Analyzer @delegate   = new MockAnalyzer(Random);
            Analyzer a           = new AnalyzerWrapperAnonymousClass2(this, @delegate.Strategy, positionGap, offsetGap, @delegate);

            RandomIndexWriter writer = new RandomIndexWriter(
#if FEATURE_INSTANCE_TESTDATA_INITIALIZATION
                this,
#endif
                Random, NewDirectory());
            Document  doc = new Document();
            FieldType ft  = new FieldType();

            ft.IsIndexed                = true;
            ft.IndexOptions             = IndexOptions.DOCS_ONLY;
            ft.IsTokenized              = true;
            ft.StoreTermVectors         = true;
            ft.StoreTermVectorPositions = true;
            ft.StoreTermVectorOffsets   = true;
            doc.Add(new Field("f", "a", ft));
            doc.Add(new Field("f", "a", ft));
            writer.AddDocument(doc, a);
            AtomicReader reader = GetOnlySegmentReader(writer.GetReader());
            Fields       fields = reader.GetTermVectors(0);
            Terms        terms  = fields.GetTerms("f");
            TermsEnum    te     = terms.GetEnumerator();

            Assert.IsTrue(te.MoveNext());
            Assert.AreEqual(new BytesRef("a"), te.Term);
            DocsAndPositionsEnum dpe = te.DocsAndPositions(null, null);

            Assert.AreEqual(0, dpe.NextDoc());
            Assert.AreEqual(2, dpe.Freq);
            Assert.AreEqual(0, dpe.NextPosition());
            Assert.AreEqual(0, dpe.StartOffset);
            int endOffset = dpe.EndOffset;

            Assert.AreEqual(1 + positionGap, dpe.NextPosition());
            Assert.AreEqual(1 + endOffset + offsetGap, dpe.EndOffset);
            Assert.IsFalse(te.MoveNext());
            reader.Dispose();
            writer.Dispose();
            writer.IndexWriter.Directory.Dispose();
        }
コード例 #14
0
        public virtual void TestChangeGaps()
        {
            // LUCENE-5324: check that it is possible to change the wrapper's gaps
            int      positionGap = Random().Next(1000);
            int      offsetGap   = Random().Next(1000);
            Analyzer @delegate   = new MockAnalyzer(Random());
            Analyzer a           = new AnalyzerWrapperAnonymousInnerClassHelper2(this, @delegate.Strategy, positionGap, offsetGap, @delegate);

            RandomIndexWriter writer = new RandomIndexWriter(Random(), NewDirectory());
            Document          doc    = new Document();
            FieldType         ft     = new FieldType();

            ft.Indexed                  = true;
            ft.IndexOptions             = FieldInfo.IndexOptions.DOCS_ONLY;
            ft.Tokenized                = true;
            ft.StoreTermVectors         = true;
            ft.StoreTermVectorPositions = true;
            ft.StoreTermVectorOffsets   = true;
            doc.Add(new Field("f", "a", ft));
            doc.Add(new Field("f", "a", ft));
            writer.AddDocument(doc, a);
            AtomicReader reader = GetOnlySegmentReader(writer.Reader);
            Fields       fields = reader.GetTermVectors(0);
            Terms        terms  = fields.Terms("f");
            TermsEnum    te     = terms.Iterator(null);

            Assert.AreEqual(new BytesRef("a"), te.Next());
            DocsAndPositionsEnum dpe = te.DocsAndPositions(null, null);

            Assert.AreEqual(0, dpe.NextDoc());
            Assert.AreEqual(2, dpe.Freq());
            Assert.AreEqual(0, dpe.NextPosition());
            Assert.AreEqual(0, dpe.StartOffset());
            int endOffset = dpe.EndOffset();

            Assert.AreEqual(1 + positionGap, dpe.NextPosition());
            Assert.AreEqual(1 + endOffset + offsetGap, dpe.EndOffset());
            Assert.AreEqual(null, te.Next());
            reader.Dispose();
            writer.Dispose();
            writer.w.Directory.Dispose();
        }
コード例 #15
0
        public virtual void TestNoOrds()
        {
            Directory         dir = NewDirectory();
            RandomIndexWriter iw  = new RandomIndexWriter(Random(), dir, Similarity, TimeZone);
            Document          doc = new Document();
            FieldType         ft  = new FieldType(TextField.TYPE_NOT_STORED);

            ft.StoreTermVectors = true;
            doc.Add(new Field("foo", "this is a test", ft));
            iw.AddDocument(doc);
            AtomicReader ir    = GetOnlySegmentReader(iw.Reader);
            Terms        terms = ir.GetTermVector(0, "foo");

            Assert.IsNotNull(terms);
            TermsEnum termsEnum = terms.Iterator(null);

            Assert.AreEqual(TermsEnum.SeekStatus.FOUND, termsEnum.SeekCeil(new BytesRef("this")));
            try
            {
                termsEnum.Ord();
                Assert.Fail();
            }
            catch (System.NotSupportedException expected)
            {
                // expected exception
            }

            try
            {
                termsEnum.SeekExact(0);
                Assert.Fail();
            }
            catch (System.NotSupportedException expected)
            {
                // expected exception
            }
            ir.Dispose();
            iw.Dispose();
            dir.Dispose();
        }
コード例 #16
0
        protected void CheckPerformance(IClassifier <T> classifier, Analyzer analyzer, String classFieldName)
        {
            AtomicReader atomicReader = null;
            long         trainStart   = J2N.Time.NanoTime() / J2N.Time.MillisecondsPerNanosecond; // LUCENENET: Use NanoTime() rather than CurrentTimeMilliseconds() for more accurate/reliable results

            try
            {
                PopulatePerformanceIndex(analyzer);
                atomicReader = SlowCompositeReaderWrapper.Wrap(indexWriter.GetReader());
                classifier.Train(atomicReader, textFieldName, classFieldName, analyzer);
                long trainEnd  = J2N.Time.NanoTime() / J2N.Time.MillisecondsPerNanosecond; // LUCENENET: Use NanoTime() rather than CurrentTimeMilliseconds() for more accurate/reliable results
                long trainTime = trainEnd - trainStart;
                Assert.IsTrue(trainTime < 120000, "training took more than 2 mins : " + trainTime / 1000 + "s");
            }
            finally
            {
                if (atomicReader != null)
                {
                    atomicReader.Dispose();
                }
            }
        }
コード例 #17
0
        protected void CheckPerformance(IClassifier <T> classifier, Analyzer analyzer, String classFieldName)
        {
            AtomicReader atomicReader = null;
            var          stopwatch    = new Stopwatch();

            stopwatch.Start();
            try
            {
                PopulatePerformanceIndex(analyzer);
                atomicReader = SlowCompositeReaderWrapper.Wrap(indexWriter.Reader);
                classifier.Train(atomicReader, textFieldName, classFieldName, analyzer);
                stopwatch.Stop();
                long trainTime = stopwatch.ElapsedMilliseconds;
                IsTrue(trainTime < 120000, "training took more than 2 mins : " + trainTime / 1000 + "s");
            }
            finally
            {
                if (atomicReader != null)
                {
                    atomicReader.Dispose();
                }
            }
        }
コード例 #18
0
        public void TestDocsEnumStart()
        {
            Analyzer    analyzer = new MockAnalyzer(Random);
            MemoryIndex memory   = new MemoryIndex(Random.nextBoolean(), Random.nextInt(50) * 1024 * 1024);

            memory.AddField("foo", "bar", analyzer);
            AtomicReader reader = (AtomicReader)memory.CreateSearcher().IndexReader;
            DocsEnum     disi   = TestUtil.Docs(Random, reader, "foo", new BytesRef("bar"), null, null, DocsFlags.NONE);
            int          docid  = disi.DocID;

            assertEquals(-1, docid);
            assertTrue(disi.NextDoc() != DocIdSetIterator.NO_MORE_DOCS);

            // now reuse and check again
            TermsEnum te = reader.GetTerms("foo").GetEnumerator();

            assertTrue(te.SeekExact(new BytesRef("bar")));
            disi  = te.Docs(null, disi, DocsFlags.NONE);
            docid = disi.DocID;
            assertEquals(-1, docid);
            assertTrue(disi.NextDoc() != DocIdSetIterator.NO_MORE_DOCS);
            reader.Dispose();
        }
コード例 #19
0
        public void TestRandomIndex()
        {
            Directory    dir      = NewDirectory();
            MockAnalyzer analyzer = new MockAnalyzer(Random);

            analyzer.MaxTokenLength = TestUtil.NextInt32(Random, 1, IndexWriter.MAX_TERM_LENGTH);
            RandomIndexWriter w = new RandomIndexWriter(Random, dir, analyzer);

            CreateRandomIndex(AtLeast(50), w, Random.NextInt64());
            DirectoryReader reader        = w.GetReader();
            AtomicReader    wrapper       = SlowCompositeReaderWrapper.Wrap(reader);
            string          field         = @"body";
            Terms           terms         = wrapper.GetTerms(field);
            var             lowFreqQueue  = new PriorityQueueAnonymousClass(5);
            var             highFreqQueue = new PriorityQueueAnonymousClass1(5);

            try
            {
                TermsEnum iterator = terms.GetEnumerator();
                while (iterator.MoveNext())
                {
                    if (highFreqQueue.Count < 5)
                    {
                        highFreqQueue.Add(new TermAndFreq(
                                              BytesRef.DeepCopyOf(iterator.Term), iterator.DocFreq));
                        lowFreqQueue.Add(new TermAndFreq(
                                             BytesRef.DeepCopyOf(iterator.Term), iterator.DocFreq));
                    }
                    else
                    {
                        if (highFreqQueue.Top.freq < iterator.DocFreq)
                        {
                            highFreqQueue.Top.freq = iterator.DocFreq;
                            highFreqQueue.Top.term = BytesRef.DeepCopyOf(iterator.Term);
                            highFreqQueue.UpdateTop();
                        }

                        if (lowFreqQueue.Top.freq > iterator.DocFreq)
                        {
                            lowFreqQueue.Top.freq = iterator.DocFreq;
                            lowFreqQueue.Top.term = BytesRef.DeepCopyOf(iterator.Term);
                            lowFreqQueue.UpdateTop();
                        }
                    }
                }

                int lowFreq  = lowFreqQueue.Top.freq;
                int highFreq = highFreqQueue.Top.freq;
                AssumeTrue(@"unlucky index", highFreq - 1 > lowFreq);
                IList <TermAndFreq> highTerms = QueueToList(highFreqQueue);
                IList <TermAndFreq> lowTerms  = QueueToList(lowFreqQueue);

                IndexSearcher    searcher     = NewSearcher(reader);
                Occur            lowFreqOccur = RandomOccur(Random);
                BooleanQuery     verifyQuery  = new BooleanQuery();
                CommonTermsQuery cq           = new CommonTermsQuery(RandomOccur(Random),
                                                                     lowFreqOccur, highFreq - 1, Random.NextBoolean());
                foreach (TermAndFreq termAndFreq in lowTerms)
                {
                    cq.Add(new Term(field, termAndFreq.term));
                    verifyQuery.Add(new BooleanClause(new TermQuery(new Term(field,
                                                                             termAndFreq.term)), lowFreqOccur));
                }
                foreach (TermAndFreq termAndFreq in highTerms)
                {
                    cq.Add(new Term(field, termAndFreq.term));
                }

                TopDocs cqSearch = searcher.Search(cq, reader.MaxDoc);

                TopDocs verifySearch = searcher.Search(verifyQuery, reader.MaxDoc);
                assertEquals(verifySearch.TotalHits, cqSearch.TotalHits);
                var hits = new JCG.HashSet <int>();
                foreach (ScoreDoc doc in verifySearch.ScoreDocs)
                {
                    hits.Add(doc.Doc);
                }

                foreach (ScoreDoc doc in cqSearch.ScoreDocs)
                {
                    assertTrue(hits.Remove(doc.Doc));
                }

                assertTrue(hits.Count == 0);

                /*
                 *  need to force merge here since QueryUtils adds checks based
                 *  on leave readers which have different statistics than the top
                 *  level reader if we have more than one segment. This could
                 *  result in a different query / results.
                 */
                w.ForceMerge(1);
                DirectoryReader reader2 = w.GetReader();
                QueryUtils.Check(Random, cq, NewSearcher(reader2));
                reader2.Dispose();
            }
            finally
            {
                reader.Dispose();
                wrapper.Dispose();
                w.Dispose();
                dir.Dispose();
            }
        }
コード例 #20
0
        public void TestRandomIndex()
        {
            Directory dir = NewDirectory();
            MockAnalyzer analyzer = new MockAnalyzer(Random);
            analyzer.MaxTokenLength = TestUtil.NextInt32(Random, 1, IndexWriter.MAX_TERM_LENGTH);
            RandomIndexWriter w = new RandomIndexWriter(
#if FEATURE_INSTANCE_TESTDATA_INITIALIZATION
                this,
#endif
                Random, dir, analyzer);
            CreateRandomIndex(AtLeast(50), w, Random.NextInt64());
            DirectoryReader reader = w.GetReader();
            AtomicReader wrapper = SlowCompositeReaderWrapper.Wrap(reader);
            string field = @"body";
            Terms terms = wrapper.GetTerms(field);
            var lowFreqQueue = new AnonymousPriorityQueue(this, 5);
            Util.PriorityQueue<TermAndFreq> highFreqQueue = new AnonymousPriorityQueue1(this, 5);
            try
            {
                TermsEnum iterator = terms.GetIterator(null);
                while (iterator.Next() != null)
                {
                    if (highFreqQueue.Count < 5)
                    {
                        highFreqQueue.Add(new TermAndFreq(BytesRef.DeepCopyOf(iterator.Term), iterator.DocFreq));
                        lowFreqQueue.Add(new TermAndFreq(BytesRef.DeepCopyOf(iterator.Term), iterator.DocFreq));
                    }
                    else
                    {
                        if (highFreqQueue.Top.freq < iterator.DocFreq)
                        {
                            highFreqQueue.Top.freq = iterator.DocFreq;
                            highFreqQueue.Top.term = BytesRef.DeepCopyOf(iterator.Term);
                            highFreqQueue.UpdateTop();
                        }

                        if (lowFreqQueue.Top.freq > iterator.DocFreq)
                        {
                            lowFreqQueue.Top.freq = iterator.DocFreq;
                            lowFreqQueue.Top.term = BytesRef.DeepCopyOf(iterator.Term);
                            lowFreqQueue.UpdateTop();
                        }
                    }
                }

                int lowFreq = lowFreqQueue.Top.freq;
                int highFreq = highFreqQueue.Top.freq;
                AssumeTrue(@"unlucky index", highFreq - 1 > lowFreq);
                List<TermAndFreq> highTerms = QueueToList(highFreqQueue);
                List<TermAndFreq> lowTerms = QueueToList(lowFreqQueue);
                IndexSearcher searcher = NewSearcher(reader);
                Occur lowFreqOccur = RandomOccur(Random);
                BooleanQuery verifyQuery = new BooleanQuery();
                CommonTermsQuery cq = new CommonTermsQuery(RandomOccur(Random), lowFreqOccur, highFreq - 1, Random.NextBoolean());
                foreach (TermAndFreq termAndFreq in lowTerms)
                {
                    cq.Add(new Term(field, termAndFreq.term));
                    verifyQuery.Add(new BooleanClause(new TermQuery(new Term(field, termAndFreq.term)), lowFreqOccur));
                }

                foreach (TermAndFreq termAndFreq in highTerms)
                {
                    cq.Add(new Term(field, termAndFreq.term));
                }

                TopDocs cqSearch = searcher.Search(cq, reader.MaxDoc);
                TopDocs verifySearch = searcher.Search(verifyQuery, reader.MaxDoc);
                assertEquals(verifySearch.TotalHits, cqSearch.TotalHits);
                var hits = new JCG.HashSet<int>();
                foreach (ScoreDoc doc in verifySearch.ScoreDocs)
                {
                    hits.Add(doc.Doc);
                }

                foreach (ScoreDoc doc in cqSearch.ScoreDocs)
                {
                    assertTrue(hits.Remove(doc.Doc));
                }

                assertTrue(hits.Count == 0);
                w.ForceMerge(1);
                DirectoryReader reader2 = w.GetReader();
                QueryUtils.Check(
#if FEATURE_INSTANCE_TESTDATA_INITIALIZATION
                    this,
#endif
                    Random, cq, NewSearcher(reader2));
                reader2.Dispose();
            }
            finally
            {
                reader.Dispose();
                wrapper.Dispose();
                w.Dispose();
                dir.Dispose();
            }
        }
コード例 #21
0
 public static void AfterClassSorterTestBase()
 {
     reader.Dispose();
     dir.Dispose();
 }
コード例 #22
0
 public override void TearDown()
 {
     reader.Dispose();
     directory.Dispose();
     base.TearDown();
 }