public virtual void TestClone() { IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())); IndexWriterConfig clone = (IndexWriterConfig)conf.Clone(); // Make sure parameters that can't be reused are cloned IndexDeletionPolicy delPolicy = conf.DelPolicy; IndexDeletionPolicy delPolicyClone = clone.DelPolicy; Assert.IsTrue(delPolicy.GetType() == delPolicyClone.GetType() && (delPolicy != delPolicyClone || delPolicy.Clone() == delPolicyClone.Clone())); FlushPolicy flushPolicy = conf.FlushPolicy; FlushPolicy flushPolicyClone = clone.FlushPolicy; Assert.IsTrue(flushPolicy.GetType() == flushPolicyClone.GetType() && (flushPolicy != flushPolicyClone || flushPolicy.Clone() == flushPolicyClone.Clone())); DocumentsWriterPerThreadPool pool = conf.IndexerThreadPool; DocumentsWriterPerThreadPool poolClone = clone.IndexerThreadPool; Assert.IsTrue(pool.GetType() == poolClone.GetType() && (pool != poolClone || pool.Clone() == poolClone.Clone())); MergePolicy mergePolicy = conf.MergePolicy; MergePolicy mergePolicyClone = clone.MergePolicy; Assert.IsTrue(mergePolicy.GetType() == mergePolicyClone.GetType() && (mergePolicy != mergePolicyClone || mergePolicy.Clone() == mergePolicyClone.Clone())); IMergeScheduler mergeSched = conf.MergeScheduler; IMergeScheduler mergeSchedClone = clone.MergeScheduler; Assert.IsTrue(mergeSched.GetType() == mergeSchedClone.GetType() && (mergeSched != mergeSchedClone || mergeSched.Clone() == mergeSchedClone.Clone())); conf.SetMergeScheduler(new SerialMergeScheduler()); Assert.AreEqual(typeof(ConcurrentMergeScheduler), clone.MergeScheduler.GetType()); }
public override MergeSpecification FindForcedMerges(SegmentInfos segmentInfos, int maxSegmentCount, IDictionary <SegmentCommitInfo, bool> segmentsToMerge) { // first find all old segments IDictionary <SegmentCommitInfo, bool> oldSegments = new Dictionary <SegmentCommitInfo, bool>(); foreach (SegmentCommitInfo si in segmentInfos.Segments) { if (segmentsToMerge.TryGetValue(si, out bool v) && ShouldUpgradeSegment(si)) { oldSegments[si] = v; } } if (Verbose()) { Message("findForcedMerges: segmentsToUpgrade=" + oldSegments); } if (oldSegments.Count == 0) { return(null); } MergeSpecification spec = m_base.FindForcedMerges(segmentInfos, maxSegmentCount, oldSegments); if (spec != null) { // remove all segments that are in merge specification from oldSegments, // the resulting set contains all segments that are left over // and will be merged to one additional segment: foreach (OneMerge om in spec.Merges) { foreach (SegmentCommitInfo sipc in om.Segments) { oldSegments.Remove(sipc); } } } if (oldSegments.Count > 0) { if (Verbose()) { Message("findForcedMerges: " + m_base.GetType().Name + " does not want to merge all old segments, merge remaining ones into new segment: " + oldSegments); } IList <SegmentCommitInfo> newInfos = new JCG.List <SegmentCommitInfo>(); foreach (SegmentCommitInfo si in segmentInfos.Segments) { if (oldSegments.ContainsKey(si)) { newInfos.Add(si); } } // add the final merge if (spec is null) { spec = new MergeSpecification(); } spec.Add(new OneMerge(newInfos)); } return(spec); }