//JAVA TO C# CONVERTER WARNING: Method 'throws' clauses are not available in C#: //ORIGINAL LINE: public void merge(IndexWriter writer, MergeTrigger trigger, boolean newMergesFound) throws java.io.IOException public override void Merge(IndexWriter writer, MergeTrigger trigger, bool newMergesFound) { while (true) { MergePolicy.OneMerge merge = writer.NextMerge; if (merge == null) { return; } bool success = false; try { MergeThread mergeThread = getMergeThread(writer, merge); _writerTaskCounter.increment(); PooledConcurrentMergePool.MergeThreadsPool.submit(MergeTask(mergeThread)); success = true; } finally { if (!success) { writer.mergeFinish(merge); _writerTaskCounter.decrement(); } } } }
protected override MergeThread GetMergeThread(IndexWriter writer, MergePolicy.OneMerge merge) { MergeThread thread = new MyMergeThread(this, writer, merge); thread.SetThreadPriority((ThreadPriority)MergeThreadPriority); thread.IsBackground = (true); thread.Name = "MyMergeThread"; return(thread); }
protected override MergeThread GetMergeThread(IndexWriter writer, MergePolicy.OneMerge merge) { MergeThread thread = new MyMergeThread(this, writer, merge); thread.ThreadPriority = MergeThreadPriority; thread.SetDaemon(true); thread.Name = "MyMergeThread"; return(thread); }
public override void Merge(IndexWriter writer, MergeTrigger trigger, bool newMergesFound) { MergePolicy.OneMerge merge = null; while ((merge = writer.NextMerge()) != null) { if (VERBOSE) { Console.WriteLine("executing merge " + merge.SegString(writer.Directory)); } writer.Merge(merge); } }
/// <summary> /// Sole constructor. </summary> public MergeThread(string name, IndexWriter writer, MergePolicy.OneMerge startMerge, InfoStream logger, bool isLoggingEnabled, ManualResetEventSlim resetEvent, Action <Exception> exceptionHandler, Action <MergePolicy.OneMerge> doMerge) { Name = name; _cancellationTokenSource = new CancellationTokenSource(); _writer = writer; _startingMerge = startMerge; _logger = logger; _isLoggingEnabled = isLoggingEnabled; _resetEvent = resetEvent; _exceptionHandler = exceptionHandler; _doMerge = doMerge; }
/// <summary>Just do the merges in sequence. We do this /// "synchronized" so that even if the application is using /// multiple threads, only one merge may run at a time. /// </summary> public override void Merge(IndexWriter writer) { lock (this) { while (true) { MergePolicy.OneMerge merge = writer.GetNextMerge(); if (merge == null) { break; } writer.Merge(merge); } } }
/// <summary> /// Just do the merges in sequence. We do this /// "synchronized" so that even if the application is using /// multiple threads, only one merge may run at a time. /// </summary> public override void Merge(IndexWriter writer, MergeTrigger trigger, bool newMergesFound) { lock (this) { while (true) { MergePolicy.OneMerge merge = writer.NextMerge; if (merge == null) { break; } writer.Merge(merge); } } }
public override void Merge(IndexWriter writer, IState state) { using (var mergeStats = _commitStats?.For(IndexingOperation.Lucene.Merge)) { var sp = Stopwatch.StartNew(); lock (this) { var totalMergesCount = writer.PendingMergesCount; mergeStats?.RecordPendingMergesCount(totalMergesCount); var executedMerges = 0; while (true) { if (sp.Elapsed > _maxMergeTime) { if (writer.PendingMergesCount > 0) { _index.ScheduleIndexingRun(); // we stop before we are done merging, force a new batch } break; } MergePolicy.OneMerge merge = writer.GetNextMerge(); if (merge == null) { break; } executedMerges++; mergeStats?.RecordMergeStats(merge.Stats); writer.Merge(merge, state); var diff = writer.PendingMergesCount - totalMergesCount + executedMerges; if (diff > 0) { // more merges can be created after a successful merge mergeStats?.RecordPendingMergesCount(diff); totalMergesCount += diff; } } } } }
internal int docShift; // total # deleted docs that were compacted by this merge public MergeDocIDRemapper(SegmentInfos infos, int[][] docMaps, int[] delCounts, MergePolicy.OneMerge merge, int mergedDocCount) { this.docMaps = docMaps; SegmentInfo firstSegment = merge.segments.Info(0); int i = 0; while (true) { SegmentInfo info = infos.Info(i); if (info.Equals(firstSegment)) { break; } minDocID += info.docCount; i++; } int numDocs = 0; for (int j = 0; j < docMaps.Length; i++, j++) { numDocs += infos.Info(i).docCount; System.Diagnostics.Debug.Assert(infos.Info(i).Equals(merge.segments.Info(j))); } maxDocID = minDocID + numDocs; starts = new int[docMaps.Length]; newStarts = new int[docMaps.Length]; starts[0] = minDocID; newStarts[0] = minDocID; for (i = 1; i < docMaps.Length; i++) { int lastDocCount = merge.segments.Info(i - 1).docCount; starts[i] = starts[i - 1] + lastDocCount; newStarts[i] = newStarts[i - 1] + lastDocCount - delCounts[i - 1]; } docShift = numDocs - mergedDocCount; // There are rare cases when docShift is 0. It happens // if you try to delete a docID that's out of bounds, // because the SegmentReader still allocates deletedDocs // and pretends it has deletions ... so we can't make // this assert here // assert docShift > 0; // Make sure it all adds up: System.Diagnostics.Debug.Assert(docShift == maxDocID - (newStarts[docMaps.Length - 1] + merge.segments.Info(docMaps.Length - 1).docCount - delCounts[docMaps.Length - 1])); }
protected override void DoMerge(MergePolicy.OneMerge merge) { OuterInstance.MergeCalled = true; base.DoMerge(merge); }
public MyMergeThread(TestMergeSchedulerExternal.MyMergeScheduler outerInstance, IndexWriter writer, MergePolicy.OneMerge merge) : base(outerInstance, writer, merge) { this.OuterInstance = outerInstance; outerInstance.OuterInstance.MergeThreadCreated = true; }
internal BlockingMerge(PooledConcurrentMergeSchedulerTest.TestPooledConcurrentMergeScheduler outerInstance, IndexWriter writer, MergePolicy.OneMerge merge, System.Threading.CountdownEvent executionLatch) : base(writer, merge) { this._outerInstance = outerInstance; this.ExecutionLatch = executionLatch; }
protected internal override MergeThread GetMergeThread(IndexWriter writer, MergePolicy.OneMerge merge) { lock (this) { return(new BlockingMerge(this, writer, merge, ExecutionLatchConflict)); } }
// Remaps all buffered deletes based on a completed // merge internal virtual void Remap(MergeDocIDRemapper mapper, SegmentInfos infos, int[][] docMaps, int[] delCounts, MergePolicy.OneMerge merge, int mergeDocCount) { lock (this) { System.Collections.IDictionary newDeleteTerms; // Remap delete-by-term if (terms.Count > 0) { if (doTermSort) { newDeleteTerms = new System.Collections.Generic.SortedDictionary <object, object>(); } else { newDeleteTerms = new System.Collections.Hashtable(); } System.Collections.IEnumerator iter = new System.Collections.Hashtable(terms).GetEnumerator(); while (iter.MoveNext()) { System.Collections.DictionaryEntry entry = (System.Collections.DictionaryEntry)iter.Current; Num num = (Num)entry.Value; newDeleteTerms[entry.Key] = new Num(mapper.Remap(num.GetNum())); } } else { newDeleteTerms = null; } // Remap delete-by-docID System.Collections.ArrayList newDeleteDocIDs; if (docIDs.Count > 0) { newDeleteDocIDs = new System.Collections.ArrayList(docIDs.Count); System.Collections.IEnumerator iter = docIDs.GetEnumerator(); while (iter.MoveNext()) { System.Int32 num = (System.Int32)iter.Current; newDeleteDocIDs.Add((System.Int32)mapper.Remap(num)); } } else { newDeleteDocIDs = null; } // Remap delete-by-query System.Collections.Hashtable newDeleteQueries; if (queries.Count > 0) { newDeleteQueries = new System.Collections.Hashtable(queries.Count); System.Collections.IEnumerator iter = new System.Collections.Hashtable(queries).GetEnumerator(); while (iter.MoveNext()) { System.Collections.DictionaryEntry entry = (System.Collections.DictionaryEntry)iter.Current; System.Int32 num = (System.Int32)entry.Value; newDeleteQueries[entry.Key] = (System.Int32)mapper.Remap(num); } } else { newDeleteQueries = null; } if (newDeleteTerms != null) { terms = newDeleteTerms; } if (newDeleteDocIDs != null) { docIDs = newDeleteDocIDs; } if (newDeleteQueries != null) { queries = newDeleteQueries; } } }