public override void Merge(IndexWriter writer, MergeTrigger trigger, bool newMergesFound) { UninterruptableMonitor.Enter(this); try { while (true) { MergePolicy.OneMerge merge = writer.NextMerge(); if (merge == null) { break; } for (int i = 0; i < merge.Segments.Count; i++) { if (Debugging.AssertsEnabled) { Debugging.Assert(merge.Segments[i].Info.DocCount < 20); } } writer.Merge(merge); } } finally { UninterruptableMonitor.Exit(this); } }
public override MergeSpecification FindMerges(MergeTrigger mergeTrigger, SegmentInfos segmentInfos) { MergeSpecification mergeSpec = null; //System.out.println("MRMP: findMerges sis=" + segmentInfos); int numSegments /* = segmentInfos.Count*/; // LUCENENET: IDE0059: Remove unnecessary value assignment JCG.List <SegmentCommitInfo> segments = new JCG.List <SegmentCommitInfo>(); ICollection <SegmentCommitInfo> merging = base.m_writer.Get().MergingSegments; foreach (SegmentCommitInfo sipc in segmentInfos.Segments) { if (!merging.Contains(sipc)) { segments.Add(sipc); } } numSegments = segments.Count; if (numSegments > 1 && (numSegments > 30 || random.Next(5) == 3)) { segments.Shuffle(random); // TODO: sometimes make more than 1 merge? mergeSpec = new MergeSpecification(); int segsToMerge = TestUtil.NextInt32(random, 1, numSegments); mergeSpec.Add(new OneMerge(segments.GetView(0, segsToMerge))); // LUCENENET: Checked length for correctness } return(mergeSpec); }
//JAVA TO C# CONVERTER WARNING: Method 'throws' clauses are not available in C#: //ORIGINAL LINE: public void merge(IndexWriter writer, MergeTrigger trigger, boolean newMergesFound) throws java.io.IOException public override void Merge(IndexWriter writer, MergeTrigger trigger, bool newMergesFound) { while (true) { MergePolicy.OneMerge merge = writer.NextMerge; if (merge == null) { return; } bool success = false; try { MergeThread mergeThread = getMergeThread(writer, merge); _writerTaskCounter.increment(); PooledConcurrentMergePool.MergeThreadsPool.submit(MergeTask(mergeThread)); success = true; } finally { if (!success) { writer.mergeFinish(merge); _writerTaskCounter.decrement(); } } } }
public override MergeSpecification FindMerges(MergeTrigger mergeTrigger, SegmentInfos segmentInfos) { MergeSpecification mergeSpec = null; //System.out.println("MRMP: findMerges sis=" + segmentInfos); int numSegments = segmentInfos.Count; IList <SegmentCommitInfo> segments = new List <SegmentCommitInfo>(); ICollection <SegmentCommitInfo> merging = base.m_writer.Get().MergingSegments; foreach (SegmentCommitInfo sipc in segmentInfos.Segments) { if (!merging.Contains(sipc)) { segments.Add(sipc); } } numSegments = segments.Count; if (numSegments > 1 && (numSegments > 30 || Random.Next(5) == 3)) { Collections.Shuffle(segments); // TODO: sometimes make more than 1 merge? mergeSpec = new MergeSpecification(); int segsToMerge = TestUtil.NextInt(Random, 1, numSegments); mergeSpec.Add(new OneMerge(segments.SubList(0, segsToMerge))); } return(mergeSpec); }
public override void Merge(IndexWriter writer, MergeTrigger trigger, bool newMergesFound) { lock (this) { if (!mayMerge.Get() && writer.NextMerge() != null) { throw new InvalidOperationException(); } base.Merge(writer, trigger, newMergesFound); } }
public override void Merge(IndexWriter writer, MergeTrigger trigger, bool newMergesFound) { lock (this) { if (!mayMerge.Value && writer.NextMerge() != null) { throw AssertionError.Create(); } base.Merge(writer, trigger, newMergesFound); } }
public override void Merge(IndexWriter writer, MergeTrigger trigger, bool newMergesFound) { MergePolicy.OneMerge merge = null; while ((merge = writer.NextMerge()) != null) { if (VERBOSE) { Console.WriteLine("executing merge " + merge.SegString(writer.Directory)); } writer.Merge(merge); } }
public override MergeSpecification FindMerges(MergeTrigger mergeTrigger, SegmentInfos segmentInfos) { MergeSpecification ms = new MergeSpecification(); if (DoMerge) { OneMerge om = new OneMerge(segmentInfos.AsList().SubList(Start, Start + Length)); ms.Add(om); DoMerge = false; return(ms); } return(null); }
public override MergeSpecification FindMerges(MergeTrigger mergeTrigger, SegmentInfos segmentInfos) { MergeSpecification ms = new MergeSpecification(); if (doMerge) { OneMerge om = new OneMerge(segmentInfos.AsList().GetView(start, length)); // LUCENENET: Converted end index to length ms.Add(om); doMerge = false; return(ms); } return(null); }
/// <summary> /// Just do the merges in sequence. We do this /// "synchronized" so that even if the application is using /// multiple threads, only one merge may run at a time. /// </summary> public override void Merge(IndexWriter writer, MergeTrigger trigger, bool newMergesFound) { lock (this) { while (true) { MergePolicy.OneMerge merge = writer.NextMerge; if (merge == null) { break; } writer.Merge(merge); } } }
/// <summary> /// Just do the merges in sequence. We do this /// "synchronized" so that even if the application is using /// multiple threads, only one merge may run at a time. /// </summary> public override void Merge(IndexWriter writer, MergeTrigger trigger, bool newMergesFound) // LUCENENET NOTE: This was internal in the original, but the base class is public so there isn't much choice here { lock (this) { while (true) { MergePolicy.OneMerge merge = writer.NextMerge(); if (merge == null) { break; } writer.Merge(merge); } } }
public override void Merge(IndexWriter writer, MergeTrigger trigger, bool newMergesFound) { UninterruptableMonitor.Enter(this); try { if (!mayMerge.Value && writer.NextMerge() != null) { throw AssertionError.Create(); } base.Merge(writer, trigger, newMergesFound); } finally { UninterruptableMonitor.Exit(this); } }
public override void Merge(IndexWriter writer, MergeTrigger trigger, bool newMergesFound) { lock (this) { while (true) { MergePolicy.OneMerge merge = writer.NextMerge(); if (merge == null) { break; } for (int i = 0; i < merge.Segments.Count; i++) { Debug.Assert(merge.Segments[i].Info.DocCount < 20); } writer.Merge(merge); } } }
public override void Merge(IndexWriter writer, MergeTrigger trigger, bool newMergesFound) // LUCENENET NOTE: This was internal in the original, but the base class is public so there isn't much choice here { UninterruptableMonitor.Enter(this); try { while (true) { MergePolicy.OneMerge merge = writer.NextMerge(); if (merge is null) { break; } writer.Merge(merge); } } finally { UninterruptableMonitor.Exit(this); } }
/// <summary> /// Checks if any merges are now necessary and returns a /// <see cref="MergePolicy.MergeSpecification"/> if so. A merge /// is necessary when there are more than /// <see cref="MergeFactor"/> segments at a given level. When /// multiple levels have too many segments, this method /// will return multiple merges, allowing the /// <see cref="MergeScheduler"/> to use concurrency. /// </summary> public override MergeSpecification FindMerges(MergeTrigger mergeTrigger, SegmentInfos infos) { int numSegments = infos.Count; if (IsVerbose) { Message("findMerges: " + numSegments + " segments"); } // Compute levels, which is just log (base mergeFactor) // of the size of each segment IList <SegmentInfoAndLevel> levels = new List <SegmentInfoAndLevel>(); var norm = (float)Math.Log(m_mergeFactor); ICollection <SegmentCommitInfo> mergingSegments = m_writer.Get().MergingSegments; for (int i = 0; i < numSegments; i++) { SegmentCommitInfo info = infos.Info(i); long size = Size(info); // Floor tiny segments if (size < 1) { size = 1; } SegmentInfoAndLevel infoLevel = new SegmentInfoAndLevel(info, (float)Math.Log(size) / norm, i); levels.Add(infoLevel); if (IsVerbose) { long segBytes = SizeBytes(info); string extra = mergingSegments.Contains(info) ? " [merging]" : ""; if (size >= m_maxMergeSize) { extra += " [skip: too large]"; } Message("seg=" + m_writer.Get().SegString(info) + " level=" + infoLevel.level + " size=" + String.Format(CultureInfo.InvariantCulture, "{0:0.00} MB", segBytes / 1024 / 1024.0) + extra); } } float levelFloor; if (m_minMergeSize <= 0) { levelFloor = (float)0.0; } else { levelFloor = (float)(Math.Log(m_minMergeSize) / norm); } // Now, we quantize the log values into levels. The // first level is any segment whose log size is within // LEVEL_LOG_SPAN of the max size, or, who has such as // segment "to the right". Then, we find the max of all // other segments and use that to define the next level // segment, etc. MergeSpecification spec = null; int numMergeableSegments = levels.Count; int start = 0; while (start < numMergeableSegments) { // Find max level of all segments not already // quantized. float maxLevel = levels[start].level; for (int i = 1 + start; i < numMergeableSegments; i++) { float level = levels[i].level; if (level > maxLevel) { maxLevel = level; } } // Now search backwards for the rightmost segment that // falls into this level: float levelBottom; if (maxLevel <= levelFloor) { // All remaining segments fall into the min level levelBottom = -1.0F; } else { levelBottom = (float)(maxLevel - LEVEL_LOG_SPAN); // Force a boundary at the level floor if (levelBottom < levelFloor && maxLevel >= levelFloor) { levelBottom = levelFloor; } } int upto = numMergeableSegments - 1; while (upto >= start) { if (levels[upto].level >= levelBottom) { break; } upto--; } if (IsVerbose) { Message(" level " + levelBottom.ToString("0.0") + " to " + maxLevel.ToString("0.0") + ": " + (1 + upto - start) + " segments"); } // Finally, record all merges that are viable at this level: int end = start + m_mergeFactor; while (end <= 1 + upto) { bool anyTooLarge = false; bool anyMerging = false; for (int i = start; i < end; i++) { SegmentCommitInfo info = levels[i].info; anyTooLarge |= (Size(info) >= m_maxMergeSize || SizeDocs(info) >= m_maxMergeDocs); if (mergingSegments.Contains(info)) { anyMerging = true; break; } } if (anyMerging) { // skip } else if (!anyTooLarge) { if (spec == null) { spec = new MergeSpecification(); } IList <SegmentCommitInfo> mergeInfos = new List <SegmentCommitInfo>(); for (int i = start; i < end; i++) { mergeInfos.Add(levels[i].info); Debug.Assert(infos.Contains(levels[i].info)); } if (IsVerbose) { Message(" add merge=" + m_writer.Get().SegString(mergeInfos) + " start=" + start + " end=" + end); } spec.Add(new OneMerge(mergeInfos)); } else if (IsVerbose) { Message(" " + start + " to " + end + ": contains segment over maxMergeSize or maxMergeDocs; skipping"); } start = end; end = start + m_mergeFactor; } start = 1 + upto; } return(spec); }
public override MergeSpecification FindMerges(MergeTrigger? mergeTrigger, SegmentInfos segmentInfos) { return @base.FindMerges(null, segmentInfos); }
public override MergeSpecification FindMerges(MergeTrigger mergeTrigger, SegmentInfos infos) { if (Verbose()) { Message("findMerges: " + infos.Count + " segments"); } if (infos.Count == 0) { return(null); } ICollection <SegmentCommitInfo> merging = m_writer.Get().MergingSegments; ICollection <SegmentCommitInfo> toBeMerged = new HashSet <SegmentCommitInfo>(); List <SegmentCommitInfo> infosSorted = new List <SegmentCommitInfo>(infos.AsList()); infosSorted.Sort(new SegmentByteSizeDescending(this)); // Compute total index bytes & print details about the index long totIndexBytes = 0; long minSegmentBytes = long.MaxValue; foreach (SegmentCommitInfo info in infosSorted) { long segBytes = Size(info); if (Verbose()) { string extra = merging.Contains(info) ? " [merging]" : ""; if (segBytes >= maxMergedSegmentBytes / 2.0) { extra += " [skip: too large]"; } else if (segBytes < floorSegmentBytes) { extra += " [floored]"; } Message(" seg=" + m_writer.Get().SegString(info) + " size=" + string.Format("{0:0.000}", segBytes / 1024 / 1024.0) + " MB" + extra); } minSegmentBytes = Math.Min(segBytes, minSegmentBytes); // Accum total byte size totIndexBytes += segBytes; } // If we have too-large segments, grace them out // of the maxSegmentCount: int tooBigCount = 0; while (tooBigCount < infosSorted.Count && Size(infosSorted[tooBigCount]) >= maxMergedSegmentBytes / 2.0) { totIndexBytes -= Size(infosSorted[tooBigCount]); tooBigCount++; } minSegmentBytes = FloorSize(minSegmentBytes); // Compute max allowed segs in the index long levelSize = minSegmentBytes; long bytesLeft = totIndexBytes; double allowedSegCount = 0; while (true) { double segCountLevel = bytesLeft / (double)levelSize; if (segCountLevel < segsPerTier) { allowedSegCount += Math.Ceiling(segCountLevel); break; } allowedSegCount += segsPerTier; bytesLeft -= (long)(segsPerTier * levelSize); levelSize *= maxMergeAtOnce; } int allowedSegCountInt = (int)allowedSegCount; MergeSpecification spec = null; // Cycle to possibly select more than one merge: while (true) { long mergingBytes = 0; // Gather eligible segments for merging, ie segments // not already being merged and not already picked (by // prior iteration of this loop) for merging: IList <SegmentCommitInfo> eligible = new List <SegmentCommitInfo>(); for (int idx = tooBigCount; idx < infosSorted.Count; idx++) { SegmentCommitInfo info = infosSorted[idx]; if (merging.Contains(info)) { mergingBytes += info.GetSizeInBytes(); } else if (!toBeMerged.Contains(info)) { eligible.Add(info); } } bool maxMergeIsRunning = mergingBytes >= maxMergedSegmentBytes; if (Verbose()) { Message(" allowedSegmentCount=" + allowedSegCountInt + " vs count=" + infosSorted.Count + " (eligible count=" + eligible.Count + ") tooBigCount=" + tooBigCount); } if (eligible.Count == 0) { return(spec); } if (eligible.Count >= allowedSegCountInt) { // OK we are over budget -- find best merge! MergeScore bestScore = null; IList <SegmentCommitInfo> best = null; bool bestTooLarge = false; long bestMergeBytes = 0; // Consider all merge starts: for (int startIdx = 0; startIdx <= eligible.Count - maxMergeAtOnce; startIdx++) { long totAfterMergeBytes = 0; IList <SegmentCommitInfo> candidate = new List <SegmentCommitInfo>(); bool hitTooLarge = false; for (int idx = startIdx; idx < eligible.Count && candidate.Count < maxMergeAtOnce; idx++) { SegmentCommitInfo info = eligible[idx]; long segBytes = Size(info); if (totAfterMergeBytes + segBytes > maxMergedSegmentBytes) { hitTooLarge = true; // NOTE: we continue, so that we can try // "packing" smaller segments into this merge // to see if we can get closer to the max // size; this in general is not perfect since // this is really "bin packing" and we'd have // to try different permutations. continue; } candidate.Add(info); totAfterMergeBytes += segBytes; } MergeScore score = Score(candidate, hitTooLarge, mergingBytes); if (Verbose()) { Message(" maybe=" + m_writer.Get().SegString(candidate) + " score=" + score.Score + " " + score.Explanation + " tooLarge=" + hitTooLarge + " size=" + string.Format("{0:0.000} MB", totAfterMergeBytes / 1024.0 / 1024.0)); } // If we are already running a max sized merge // (maxMergeIsRunning), don't allow another max // sized merge to kick off: if ((bestScore == null || score.Score < bestScore.Score) && (!hitTooLarge || !maxMergeIsRunning)) { best = candidate; bestScore = score; bestTooLarge = hitTooLarge; bestMergeBytes = totAfterMergeBytes; } } if (best != null) { if (spec == null) { spec = new MergeSpecification(); } OneMerge merge = new OneMerge(best); spec.Add(merge); foreach (SegmentCommitInfo info in merge.Segments) { toBeMerged.Add(info); } if (Verbose()) { Message(" add merge=" + m_writer.Get().SegString(merge.Segments) + " size=" + string.Format("{0:0.000} MB", bestMergeBytes / 1024.0 / 1024.0) + " score=" + string.Format("{0:0.000}", bestScore.Score) + " " + bestScore.Explanation + (bestTooLarge ? " [max merge]" : "")); } } else { return(spec); } } else { return(spec); } } }
public override void Merge(IndexWriter writer, MergeTrigger trigger, bool newMergesFound) { lock (this) { if (!MayMerge.Get() && writer.NextMerge != null) { throw new InvalidOperationException(); } base.Merge(writer, trigger, newMergesFound); } }
/// <summary> /// Determine what set of merge operations are now necessary on the index. /// <seealso cref="IndexWriter"/> calls this whenever there is a change to the segments. /// this call is always synchronized on the <seealso cref="IndexWriter"/> instance so /// only one thread at a time will call this method. </summary> /// <param name="mergeTrigger"> the event that triggered the merge </param> /// <param name="segmentInfos"> /// the total set of segments in the index </param> public abstract MergeSpecification FindMerges(MergeTrigger? mergeTrigger, SegmentInfos segmentInfos);
public override void Merge(IndexWriter writer, MergeTrigger trigger, bool newMergesFound) { lock (this) { //Debug.Assert(!Thread.holdsLock(writer)); this.m_writer = writer; InitMergeThreadPriority(); m_dir = writer.Directory; // First, quickly run through the newly proposed merges // and add any orthogonal merges (ie a merge not // involving segments already pending to be merged) to // the queue. If we are way behind on merging, many of // these newly proposed merges will likely already be // registered. if (IsVerbose) { Message("now merge"); Message(" index: " + writer.SegString()); } // Iterate, pulling from the IndexWriter's queue of // pending merges, until it's empty: while (true) { long startStallTime = 0; while (writer.HasPendingMerges() && MergeThreadCount >= maxMergeCount) { // this means merging has fallen too far behind: we // have already created maxMergeCount threads, and // now there's at least one more merge pending. // Note that only maxThreadCount of // those created merge threads will actually be // running; the rest will be paused (see // updateMergeThreads). We stall this producer // thread to prevent creation of new segments, // until merging has caught up: startStallTime = Environment.TickCount; if (IsVerbose) { Message(" too many merges; stalling..."); } //try //{ Monitor.Wait(this); //} //catch (ThreadInterruptedException ie) // LUCENENET NOTE: Senseless to catch and rethrow the same exception type //{ // throw new ThreadInterruptedException(ie.ToString(), ie); //} } if (IsVerbose) { if (startStallTime != 0) { Message(" stalled for " + (Environment.TickCount - startStallTime) + " msec"); } } MergePolicy.OneMerge merge = writer.NextMerge(); if (merge == null) { if (IsVerbose) { Message(" no more merges pending; now return"); } return; } bool success = false; try { if (IsVerbose) { Message(" consider merge " + writer.SegString(merge.Segments)); } // OK to spawn a new merge thread to handle this // merge: MergeThread merger = GetMergeThread(writer, merge); m_mergeThreads.Add(merger); if (IsVerbose) { Message(" launch new thread [" + merger.Name + "]"); } merger.Start(); // Must call this after starting the thread else // the new thread is removed from mergeThreads // (since it's not alive yet): UpdateMergeThreads(); success = true; } finally { if (!success) { writer.MergeFinish(merge); } } } } }
public override MergeSpecification FindMerges(MergeTrigger mergeTrigger, SegmentInfos segmentInfos) { return(null); }
public override MergeSpecification FindMerges(MergeTrigger? mergeTrigger, SegmentInfos segmentInfos) { MergeSpecification ms = new MergeSpecification(); if (DoMerge) { OneMerge om = new OneMerge(segmentInfos.AsList().SubList(Start, Start + Length)); ms.Add(om); DoMerge = false; return ms; } return null; }
public override MergeSpecification FindMerges(MergeTrigger mergeTrigger, SegmentInfos segmentInfos) { // LUCENENET specific - just use min value to indicate "null" for merge trigger return(m_base.FindMerges((MergeTrigger)int.MinValue, segmentInfos)); }
private void MaybeMerge(MergeTrigger trigger, int maxNumSegments) { EnsureOpen(false); bool newMergesFound = UpdatePendingMerges(trigger, maxNumSegments); mergeScheduler.Merge(this, trigger, newMergesFound); }
private bool UpdatePendingMerges(MergeTrigger trigger, int maxNumSegments) { lock (this) { Debug.Assert(maxNumSegments == -1 || maxNumSegments > 0); Debug.Assert(trigger != null); if (StopMerges) { return false; } // Do not start new merges if we've hit OOME if (HitOOM) { return false; } bool newMergesFound = false; MergePolicy.MergeSpecification spec; if (maxNumSegments != UNBOUNDED_MAX_MERGE_SEGMENTS) { Debug.Assert(trigger == MergeTrigger.EXPLICIT || trigger == MergeTrigger.MERGE_FINISHED, "Expected EXPLICT or MERGE_FINISHED as trigger even with maxNumSegments set but was: " + trigger.ToString()); spec = mergePolicy.FindForcedMerges(segmentInfos, maxNumSegments, SegmentsToMerge); newMergesFound = spec != null; if (newMergesFound) { int numMerges = spec.Merges.Count; for (int i = 0; i < numMerges; i++) { MergePolicy.OneMerge merge = spec.Merges[i]; merge.MaxNumSegments = maxNumSegments; } } } else { spec = mergePolicy.FindMerges(trigger, segmentInfos); } newMergesFound = spec != null; if (newMergesFound) { int numMerges = spec.Merges.Count; for (int i = 0; i < numMerges; i++) { RegisterMerge(spec.Merges[i]); } } return newMergesFound; } }
//JAVA TO C# CONVERTER WARNING: Method 'throws' clauses are not available in .NET: //ORIGINAL LINE: @Override public MergeSpecification findMerges(org.apache.lucene.index.MergeTrigger mergeTrigger, org.apache.lucene.index.SegmentInfos segmentInfos) throws java.io.IOException public override MergeSpecification findMerges(MergeTrigger mergeTrigger, SegmentInfos segmentInfos) { return(sortedMergeSpecification(@in.findMerges(mergeTrigger, segmentInfos))); }
public override MergeSpecification FindMerges(MergeTrigger? mergeTrigger, SegmentInfos segmentInfos) { MergeSpecification mergeSpec = null; //System.out.println("MRMP: findMerges sis=" + segmentInfos); int numSegments = segmentInfos.Size(); IList<SegmentCommitInfo> segments = new List<SegmentCommitInfo>(); ICollection<SegmentCommitInfo> merging = Writer.Get().MergingSegments; foreach (SegmentCommitInfo sipc in segmentInfos.Segments) { if (!merging.Contains(sipc)) { segments.Add(sipc); } } numSegments = segments.Count; if (numSegments > 1 && (numSegments > 30 || Random.Next(5) == 3)) { segments = CollectionsHelper.Shuffle(segments); // TODO: sometimes make more than 1 merge? mergeSpec = new MergeSpecification(); int segsToMerge = TestUtil.NextInt(Random, 1, numSegments); mergeSpec.Add(new OneMerge(segments.SubList(0, segsToMerge))); } return mergeSpec; }
public override void Merge(IndexWriter writer, MergeTrigger trigger, bool newMergesFound) { }
//JAVA TO C# CONVERTER WARNING: Method 'throws' clauses are not available in .NET: //ORIGINAL LINE: @Override public MergeSpecification findMerges(org.apache.lucene.index.MergeTrigger mergeTrigger, org.apache.lucene.index.SegmentInfos segmentInfos) throws java.io.IOException public override MergeSpecification findMerges(MergeTrigger mergeTrigger, SegmentInfos segmentInfos) { return sortedMergeSpecification(@in.findMerges(mergeTrigger, segmentInfos)); }
public override void Merge(IndexWriter writer, MergeTrigger trigger, bool newMergesFound) { outerInstance.mergeCalled = true; base.Merge(writer, trigger, newMergesFound); }
public override void Merge(IndexWriter writer, MergeTrigger trigger, bool newMergesFound) { using (_lock.Write()) { _writer = writer; _directory = writer.Directory; if (Verbose()) { Message("now merge"); Message(" index: " + writer.SegString()); } // First, quickly run through the newly proposed merges // and add any orthogonal merges (ie a merge not // involving segments already pending to be merged) to // the queue. If we are way behind on merging, many of // these newly proposed merges will likely already be // registered. // Iterate, pulling from the IndexWriter's queue of // pending merges, until it's empty: while (true) { long startStallTime = 0; while (writer.HasPendingMerges() && MergeThreadCount() >= MaxMergeCount) { // this means merging has fallen too far behind: we // have already created maxMergeCount threads, and // now there's at least one more merge pending. // Note that only maxThreadCount of // those created merge threads will actually be // running; the rest will be paused (see // updateMergeThreads). We stall this producer // thread to prevent creation of new segments, // until merging has caught up: startStallTime = Environment.TickCount; if (Verbose()) { Message(" too many merges; stalling..."); } _manualResetEvent.Reset(); _manualResetEvent.Wait(); } if (Verbose()) { if (startStallTime != 0) { Message(" stalled for " + (Environment.TickCount - startStallTime) + " msec"); } } MergePolicy.OneMerge merge = writer.NextMerge; if (merge == null) { if (Verbose()) { Message(" no more merges pending; now return"); } return; } bool success = false; try { if (Verbose()) { Message(" consider merge " + writer.SegString(merge.Segments)); } // OK to spawn a new merge thread to handle this // merge: var merger = CreateTask(writer, merge); merger.MergeThreadCompleted += OnMergeThreadCompleted; _mergeThreads.Add(merger); if (Verbose()) { Message(" launch new thread [" + merger.Name + "]"); } merger.Start(_taskScheduler); // Must call this after starting the thread else // the new thread is removed from mergeThreads // (since it's not alive yet): UpdateMergeThreads(); success = true; } finally { if (!success) { writer.MergeFinish(merge); } } } } }
/// <summary> /// Run the merges provided by <seealso cref="IndexWriter#getNextMerge()"/>. </summary> /// <param name="writer"> the <seealso cref="IndexWriter"/> to obtain the merges from. </param> /// <param name="trigger"> the <seealso cref="MergeTrigger"/> that caused this merge to happen </param> /// <param name="newMergesFound"> <code>true</code> iff any new merges were found by the caller otherwise <code>false</code> /// </param> public abstract void Merge(IndexWriter writer, MergeTrigger trigger, bool newMergesFound);
public override void Merge(IndexWriter writer, MergeTrigger trigger, bool newMergesFound) { lock (this) { while (true) { MergePolicy.OneMerge merge = writer.NextMerge; if (merge == null) { break; } for (int i = 0; i < merge.Segments.Count; i++) { Debug.Assert(merge.Segments[i].Info.DocCount < 20); } writer.Merge(merge); } } }
public override void Merge(IndexWriter writer, MergeTrigger trigger, bool newMergesFound) { using (_lock.Write()) { _writer = writer; _directory = writer.Directory; if (Verbose) { Message("now merge"); Message(" index: " + writer.SegString()); } // First, quickly run through the newly proposed merges // and add any orthogonal merges (ie a merge not // involving segments already pending to be merged) to // the queue. If we are way behind on merging, many of // these newly proposed merges will likely already be // registered. // Iterate, pulling from the IndexWriter's queue of // pending merges, until it's empty: while (true) { long startStallTime = 0; while (writer.HasPendingMerges() && MergeThreadCount >= MaxMergeCount) { // this means merging has fallen too far behind: we // have already created maxMergeCount threads, and // now there's at least one more merge pending. // Note that only maxThreadCount of // those created merge threads will actually be // running; the rest will be paused (see // updateMergeThreads). We stall this producer // thread to prevent creation of new segments, // until merging has caught up: startStallTime = Environment.TickCount; if (Verbose) { Message(" too many merges; stalling..."); } _manualResetEvent.Reset(); _manualResetEvent.Wait(); } if (Verbose) { if (startStallTime != 0) { Message(" stalled for " + (Environment.TickCount - startStallTime) + " msec"); } } MergePolicy.OneMerge merge = writer.NextMerge(); if (merge == null) { if (Verbose) { Message(" no more merges pending; now return"); } return; } bool success = false; try { if (Verbose) { Message(" consider merge " + writer.SegString(merge.Segments)); } // OK to spawn a new merge thread to handle this // merge: var merger = CreateTask(writer, merge); merger.MergeThreadCompleted += OnMergeThreadCompleted; _mergeThreads.Add(merger); if (Verbose) { Message(" launch new thread [" + merger.Name + "]"); } merger.Start(_taskScheduler); // Must call this after starting the thread else // the new thread is removed from mergeThreads // (since it's not alive yet): UpdateMergeThreads(); success = true; } finally { if (!success) { writer.MergeFinish(merge); } } } } }
/// <summary> /// Run the merges provided by <see cref="IndexWriter.NextMerge()"/>. </summary> /// <param name="writer"> the <see cref="IndexWriter"/> to obtain the merges from. </param> /// <param name="trigger"> the <see cref="MergeTrigger"/> that caused this merge to happen </param> /// <param name="newMergesFound"> <c>true</c> iff any new merges were found by the caller; otherwise <c>false</c> /// </param> public abstract void Merge(IndexWriter writer, MergeTrigger trigger, bool newMergesFound);
public override void Merge(IndexWriter writer, MergeTrigger trigger, bool newMergesFound) { MergePolicy.OneMerge merge = null; while ((merge = writer.NextMerge) != null) { if (VERBOSE) { Console.WriteLine("executing merge " + merge.SegString(writer.Directory)); } writer.Merge(merge); } }
/// <summary> /// Determine what set of merge operations are now necessary on the index. /// <see cref="IndexWriter"/> calls this whenever there is a change to the segments. /// This call is always synchronized on the <see cref="IndexWriter"/> instance so /// only one thread at a time will call this method. </summary> /// <param name="mergeTrigger"> the event that triggered the merge </param> /// <param name="segmentInfos"> /// the total set of segments in the index </param> public abstract MergeSpecification FindMerges(MergeTrigger mergeTrigger, SegmentInfos segmentInfos);