Clone() 공개 메소드

public Clone ( ) : object
리턴 object
        public virtual void TestClone()
        {
            IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()));
            IndexWriterConfig clone = (IndexWriterConfig)conf.Clone();

            // Make sure parameters that can't be reused are cloned
            IndexDeletionPolicy delPolicy = conf.DelPolicy;
            IndexDeletionPolicy delPolicyClone = clone.DelPolicy;
            Assert.IsTrue(delPolicy.GetType() == delPolicyClone.GetType() && (delPolicy != delPolicyClone || delPolicy.Clone() == delPolicyClone.Clone()));

            FlushPolicy flushPolicy = conf.FlushPolicy;
            FlushPolicy flushPolicyClone = clone.FlushPolicy;
            Assert.IsTrue(flushPolicy.GetType() == flushPolicyClone.GetType() && (flushPolicy != flushPolicyClone || flushPolicy.Clone() == flushPolicyClone.Clone()));

            DocumentsWriterPerThreadPool pool = conf.IndexerThreadPool;
            DocumentsWriterPerThreadPool poolClone = clone.IndexerThreadPool;
            Assert.IsTrue(pool.GetType() == poolClone.GetType() && (pool != poolClone || pool.Clone() == poolClone.Clone()));

            MergePolicy mergePolicy = conf.MergePolicy;
            MergePolicy mergePolicyClone = clone.MergePolicy;
            Assert.IsTrue(mergePolicy.GetType() == mergePolicyClone.GetType() && (mergePolicy != mergePolicyClone || mergePolicy.Clone() == mergePolicyClone.Clone()));

            MergeScheduler mergeSched = conf.MergeScheduler;
            MergeScheduler mergeSchedClone = clone.MergeScheduler;
            Assert.IsTrue(mergeSched.GetType() == mergeSchedClone.GetType() && (mergeSched != mergeSchedClone || mergeSched.Clone() == mergeSchedClone.Clone()));

            conf.SetMergeScheduler(new SerialMergeScheduler());
            Assert.AreEqual(typeof(ConcurrentMergeScheduler), clone.MergeScheduler.GetType());
        }
예제 #2
0
        public virtual void TestTypeChangeAfterOpenCreate()
        {
            Directory         dir    = NewDirectory();
            IndexWriterConfig conf   = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random));
            IndexWriter       writer = new IndexWriter(dir, (IndexWriterConfig)conf.Clone());
            Document          doc    = new Document();

            doc.Add(new NumericDocValuesField("dv", 0L));
            writer.AddDocument(doc);
            writer.Dispose();
            conf.SetOpenMode(OpenMode.CREATE);
            writer = new IndexWriter(dir, (IndexWriterConfig)conf.Clone());
            doc    = new Document();
            doc.Add(new SortedDocValuesField("dv", new BytesRef("foo")));
            writer.AddDocument(doc);
            writer.Dispose();
            dir.Dispose();
        }
예제 #3
0
        public virtual void TestReuse()
        {
            Directory dir = NewDirectory();
            // test that IWC cannot be reused across two IWs
            IndexWriterConfig conf = NewIndexWriterConfig(TEST_VERSION_CURRENT, null);

            (new RandomIndexWriter(Random(), dir, conf)).Dispose();

            // this should fail
            try
            {
                Assert.IsNotNull(new RandomIndexWriter(Random(), dir, conf));
                Assert.Fail("should have hit AlreadySetException");
            }
#pragma warning disable 168
            catch (AlreadySetException e)
#pragma warning restore 168
            {
                // expected
            }

            // also cloning it won't help, after it has been used already
            try
            {
                Assert.IsNotNull(new RandomIndexWriter(Random(), dir, (IndexWriterConfig)conf.Clone()));
                Assert.Fail("should have hit AlreadySetException");
            }
#pragma warning disable 168
            catch (AlreadySetException e)
#pragma warning restore 168
            {
                // expected
            }

            // if it's cloned in advance, it should be ok
            conf = NewIndexWriterConfig(TEST_VERSION_CURRENT, null);
            (new RandomIndexWriter(Random(), dir, (IndexWriterConfig)conf.Clone())).Dispose();
            (new RandomIndexWriter(Random(), dir, (IndexWriterConfig)conf.Clone())).Dispose();

            dir.Dispose();
        }
예제 #4
0
        /// <summary>
        /// Perform the upgrade. </summary>
        public void Upgrade()
        {
            if (!DirectoryReader.IndexExists(dir))
            {
                throw new IndexNotFoundException(dir.ToString());
            }

            if (!deletePriorCommits)
            {
                ICollection <IndexCommit> commits = DirectoryReader.ListCommits(dir);
                if (commits.Count > 1)
                {
                    throw new System.ArgumentException("this tool was invoked to not delete prior commit points, but the following commits were found: " + commits);
                }
            }

            IndexWriterConfig c = (IndexWriterConfig)iwc.Clone();

            c.MergePolicy         = new UpgradeIndexMergePolicy(c.MergePolicy);
            c.IndexDeletionPolicy = new KeepOnlyLastCommitDeletionPolicy();

            IndexWriter w = new IndexWriter(dir, c);

            try
            {
                InfoStream infoStream = c.InfoStream;
                if (infoStream.IsEnabled("IndexUpgrader"))
                {
                    infoStream.Message("IndexUpgrader", "Upgrading all pre-" + Constants.LUCENE_MAIN_VERSION + " segments of index directory '" + dir + "' to version " + Constants.LUCENE_MAIN_VERSION + "...");
                }
                w.ForceMerge(1);
                if (infoStream.IsEnabled("IndexUpgrader"))
                {
                    infoStream.Message("IndexUpgrader", "All segments upgraded to version " + Constants.LUCENE_MAIN_VERSION);
                }
            }
            finally
            {
                w.Dispose();
            }
        }
예제 #5
0
        public virtual void TestClone()
        {
            IndexWriterConfig conf  = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()));
            IndexWriterConfig clone = (IndexWriterConfig)conf.Clone();

            // Make sure parameters that can't be reused are cloned
            IndexDeletionPolicy delPolicy      = conf.IndexDeletionPolicy;
            IndexDeletionPolicy delPolicyClone = clone.IndexDeletionPolicy;

            Assert.IsTrue(delPolicy.GetType() == delPolicyClone.GetType() && (delPolicy != delPolicyClone || delPolicy.Clone() == delPolicyClone.Clone()));

            FlushPolicy flushPolicy      = conf.FlushPolicy;
            FlushPolicy flushPolicyClone = clone.FlushPolicy;

            Assert.IsTrue(flushPolicy.GetType() == flushPolicyClone.GetType() && (flushPolicy != flushPolicyClone || flushPolicy.Clone() == flushPolicyClone.Clone()));

            DocumentsWriterPerThreadPool pool      = conf.IndexerThreadPool;
            DocumentsWriterPerThreadPool poolClone = clone.IndexerThreadPool;

            Assert.IsTrue(pool.GetType() == poolClone.GetType() && (pool != poolClone || pool.Clone() == poolClone.Clone()));

            MergePolicy mergePolicy      = conf.MergePolicy;
            MergePolicy mergePolicyClone = clone.MergePolicy;

            Assert.IsTrue(mergePolicy.GetType() == mergePolicyClone.GetType() && (mergePolicy != mergePolicyClone || mergePolicy.Clone() == mergePolicyClone.Clone()));

            IMergeScheduler mergeSched      = conf.MergeScheduler;
            IMergeScheduler mergeSchedClone = clone.MergeScheduler;

            Assert.IsTrue(mergeSched.GetType() == mergeSchedClone.GetType() && (mergeSched != mergeSchedClone || mergeSched.Clone() == mergeSchedClone.Clone()));

            conf.SetMergeScheduler(new SerialMergeScheduler());
#if !FEATURE_CONCURRENTMERGESCHEDULER
            Assert.AreEqual(typeof(TaskMergeScheduler), clone.MergeScheduler.GetType());
#else
            Assert.AreEqual(typeof(ConcurrentMergeScheduler), clone.MergeScheduler.GetType());
#endif
        }
예제 #6
0
        // [Test] // LUCENENET NOTE: For now, we are overriding this test in every subclass to pull it into the right context for the subclass
        public virtual void TestWriteReadMerge()
        {
            // get another codec, other than the default: so we are merging segments across different codecs
            Codec otherCodec;

            if ("SimpleText".Equals(Codec.Default.Name, StringComparison.Ordinal))
            {
                otherCodec = new Lucene46Codec();
            }
            else
            {
                otherCodec = new SimpleTextCodec();
            }
            Directory         dir    = NewDirectory();
            IndexWriterConfig iwConf = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()));

            iwConf.SetMaxBufferedDocs(RandomInts.NextIntBetween(Random(), 2, 30));
            RandomIndexWriter iw = new RandomIndexWriter(Random(), dir, (IndexWriterConfig)iwConf.Clone());

            int docCount = AtLeast(200);
            var data     = new byte[docCount][][];

            for (int i = 0; i < docCount; ++i)
            {
                int fieldCount = Rarely() ? RandomInts.NextIntBetween(Random(), 1, 500) : RandomInts.NextIntBetween(Random(), 1, 5);
                data[i] = new byte[fieldCount][];
                for (int j = 0; j < fieldCount; ++j)
                {
                    int length = Rarely() ? Random().Next(1000) : Random().Next(10);
                    int max    = Rarely() ? 256 : 2;
                    data[i][j] = RandomByteArray(length, max);
                }
            }

            FieldType type = new FieldType(StringField.TYPE_STORED);

            type.IsIndexed = false;
            type.Freeze();
            Int32Field id = new Int32Field("id", 0, Field.Store.YES);

            for (int i = 0; i < data.Length; ++i)
            {
                Document doc = new Document();
                doc.Add(id);
                id.SetInt32Value(i);
                for (int j = 0; j < data[i].Length; ++j)
                {
                    Field f = new Field("bytes" + j, data[i][j], type);
                    doc.Add(f);
                }
                iw.w.AddDocument(doc);
                if (Random().NextBoolean() && (i % (data.Length / 10) == 0))
                {
                    iw.w.Dispose();
                    // test merging against a non-compressing codec
                    if (iwConf.Codec == otherCodec)
                    {
                        iwConf.SetCodec(Codec.Default);
                    }
                    else
                    {
                        iwConf.SetCodec(otherCodec);
                    }
                    iw = new RandomIndexWriter(Random(), dir, (IndexWriterConfig)iwConf.Clone());
                }
            }

            for (int i = 0; i < 10; ++i)
            {
                int min = Random().Next(data.Length);
                int max = min + Random().Next(20);
                iw.DeleteDocuments(NumericRangeQuery.NewInt32Range("id", min, max, true, false));
            }

            iw.ForceMerge(2); // force merges with deletions

            iw.Commit();

            DirectoryReader ir = DirectoryReader.Open(dir);

            Assert.IsTrue(ir.NumDocs > 0);
            int numDocs = 0;

            for (int i = 0; i < ir.MaxDoc; ++i)
            {
                Document doc = ir.Document(i);
                if (doc == null)
                {
                    continue;
                }
                ++numDocs;
                int docId = (int)doc.GetField("id").GetNumericValue();
                Assert.AreEqual(data[docId].Length + 1, doc.Fields.Count);
                for (int j = 0; j < data[docId].Length; ++j)
                {
                    var      arr     = data[docId][j];
                    BytesRef arr2Ref = doc.GetBinaryValue("bytes" + j);
                    var      arr2    = Arrays.CopyOfRange(arr2Ref.Bytes, arr2Ref.Offset, arr2Ref.Offset + arr2Ref.Length);
                    Assert.AreEqual(arr, arr2);
                }
            }
            Assert.IsTrue(ir.NumDocs <= numDocs);
            ir.Dispose();

            iw.DeleteAll();
            iw.Commit();
            iw.ForceMerge(1);

            iw.Dispose();
            dir.Dispose();
        }
예제 #7
0
        public virtual void TestDeletesCheckIndexOutput()
        {
            Directory dir = NewDirectory();
            IndexWriterConfig iwc = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()));
            iwc.SetMaxBufferedDocs(2);
            IndexWriter w = new IndexWriter(dir, (IndexWriterConfig)iwc.Clone());
            Document doc = new Document();
            doc.Add(NewField("field", "0", StringField.TYPE_NOT_STORED));
            w.AddDocument(doc);

            doc = new Document();
            doc.Add(NewField("field", "1", StringField.TYPE_NOT_STORED));
            w.AddDocument(doc);
            w.Commit();
            Assert.AreEqual(1, w.SegmentCount);

            w.DeleteDocuments(new Term("field", "0"));
            w.Commit();
            Assert.AreEqual(1, w.SegmentCount);
            w.Dispose();

            ByteArrayOutputStream bos = new ByteArrayOutputStream(1024);
            //MemoryStream bos = new MemoryStream(1024);
            CheckIndex checker = new CheckIndex(dir);
            checker.InfoStream = new StreamWriter(bos, Encoding.UTF8);
            CheckIndex.Status indexStatus = checker.DoCheckIndex(null);
            Assert.IsTrue(indexStatus.Clean);
            checker.FlushInfoStream();
            string s = bos.ToString();

            // Segment should have deletions:
            Assert.IsTrue(s.Contains("has deletions"), "string was: " + s);
            w = new IndexWriter(dir, (IndexWriterConfig)iwc.Clone());
            w.ForceMerge(1);
            w.Dispose();

            bos = new ByteArrayOutputStream(1024);
            checker.InfoStream = new StreamWriter(bos, Encoding.UTF8);
            indexStatus = checker.DoCheckIndex(null);
            Assert.IsTrue(indexStatus.Clean);
            checker.FlushInfoStream();
            s = bos.ToString();
            Assert.IsFalse(s.Contains("has deletions"));
            dir.Dispose();
        }