/// <summary>Merges the provided indexes into this index. /// <p>After this completes, the index is optimized. </p> /// <p>The provided Monodoc.Lucene.Net.Index.IndexReaders are not closed.</p> /// </summary> public virtual void AddIndexes(Monodoc.Lucene.Net.Index.IndexReader[] readers) { lock (this) { Optimize(); // start with zero or 1 seg System.String mergedName = NewSegmentName(); SegmentMerger merger = new SegmentMerger(directory, mergedName, false); if (segmentInfos.Count == 1) { // add existing index, if any merger.Add(new SegmentReader(segmentInfos.Info(0))); } for (int i = 0; i < readers.Length; i++) { // add new indexes merger.Add(readers[i]); } int docCount = merger.Merge(); // merge 'em segmentInfos.Clear(); // pop old infos & add new segmentInfos.Add(new SegmentInfo(mergedName, docCount, directory)); lock (directory) { // in- & inter-process sync new AnonymousClassWith1(this, directory.MakeLock("commit.lock"), COMMIT_LOCK_TIMEOUT).Run(); } } }
/// <summary>Pops segments off of segmentInfos stack down to minSegment, merges them, /// and pushes the merged index onto the top of the segmentInfos stack. /// </summary> private void MergeSegments(int minSegment) { System.String mergedName = NewSegmentName(); if (infoStream != null) { infoStream.Write("merging segments"); } SegmentMerger merger = new SegmentMerger(directory, mergedName, useCompoundFile); System.Collections.ArrayList segmentsToDelete = System.Collections.ArrayList.Synchronized(new System.Collections.ArrayList(10)); for (int i = minSegment; i < segmentInfos.Count; i++) { SegmentInfo si = segmentInfos.Info(i); if (infoStream != null) { infoStream.Write(" " + si.name + " (" + si.docCount + " docs)"); } Monodoc.Lucene.Net.Index.IndexReader reader = new SegmentReader(si); merger.Add(reader); if ((reader.Directory() == this.directory) || (reader.Directory() == this.ramDirectory)) { segmentsToDelete.Add(reader); // queue segment for deletion } } int mergedDocCount = merger.Merge(); if (infoStream != null) { infoStream.WriteLine(" into " + mergedName + " (" + mergedDocCount + " docs)"); } segmentInfos.RemoveRange(minSegment, segmentInfos.Count - minSegment); // pop old infos & add new segmentInfos.Add(new SegmentInfo(mergedName, mergedDocCount, directory)); // close readers before we attempt to delete now-obsolete segments merger.CloseReaders(); lock (directory) { // in- & inter-process sync new AnonymousClassWith2(segmentsToDelete, this, directory.MakeLock(IndexWriter.COMMIT_LOCK_NAME), COMMIT_LOCK_TIMEOUT).Run(); } }
/// <summary>Pops segments off of segmentInfos stack down to minSegment, merges them, /// and pushes the merged index onto the top of the segmentInfos stack. /// </summary> private void MergeSegments(int minSegment) { System.String mergedName = NewSegmentName(); if (infoStream != null) infoStream.Write("merging segments"); SegmentMerger merger = new SegmentMerger(directory, mergedName, useCompoundFile); System.Collections.ArrayList segmentsToDelete = System.Collections.ArrayList.Synchronized(new System.Collections.ArrayList(10)); for (int i = minSegment; i < segmentInfos.Count; i++) { SegmentInfo si = segmentInfos.Info(i); if (infoStream != null) infoStream.Write(" " + si.name + " (" + si.docCount + " docs)"); Monodoc.Lucene.Net.Index.IndexReader reader = new SegmentReader(si); merger.Add(reader); if ((reader.Directory() == this.directory) || (reader.Directory() == this.ramDirectory)) segmentsToDelete.Add(reader); // queue segment for deletion } int mergedDocCount = merger.Merge(); if (infoStream != null) { infoStream.WriteLine(" into " + mergedName + " (" + mergedDocCount + " docs)"); } segmentInfos.RemoveRange(minSegment, segmentInfos.Count - minSegment); // pop old infos & add new segmentInfos.Add(new SegmentInfo(mergedName, mergedDocCount, directory)); // close readers before we attempt to delete now-obsolete segments merger.CloseReaders(); lock (directory) { // in- & inter-process sync new AnonymousClassWith2(segmentsToDelete, this, directory.MakeLock(IndexWriter.COMMIT_LOCK_NAME), COMMIT_LOCK_TIMEOUT).Run(); } }
/// <summary>Merges the provided indexes into this index. /// <p>After this completes, the index is optimized. </p> /// <p>The provided Monodoc.Lucene.Net.Index.IndexReaders are not closed.</p> /// </summary> public virtual void AddIndexes(Monodoc.Lucene.Net.Index.IndexReader[] readers) { lock (this) { Optimize(); // start with zero or 1 seg System.String mergedName = NewSegmentName(); SegmentMerger merger = new SegmentMerger(directory, mergedName, false); if (segmentInfos.Count == 1) // add existing index, if any merger.Add(new SegmentReader(segmentInfos.Info(0))); for (int i = 0; i < readers.Length; i++) // add new indexes merger.Add(readers[i]); int docCount = merger.Merge(); // merge 'em segmentInfos.Clear(); // pop old infos & add new segmentInfos.Add(new SegmentInfo(mergedName, docCount, directory)); lock (directory) { // in- & inter-process sync new AnonymousClassWith1(this, directory.MakeLock("commit.lock"), COMMIT_LOCK_TIMEOUT).Run(); } } }