/// <summary>Pops segments off of segmentInfos stack down to minSegment, merges them, /// and pushes the merged index onto the top of the segmentInfos stack. /// </summary> private void MergeSegments(int minSegment) { System.String mergedName = NewSegmentName(); if (infoStream != null) { infoStream.Write("merging segments"); } SegmentMerger merger = new SegmentMerger(directory, mergedName, useCompoundFile); System.Collections.ArrayList segmentsToDelete = System.Collections.ArrayList.Synchronized(new System.Collections.ArrayList(10)); for (int i = minSegment; i < segmentInfos.Count; i++) { SegmentInfo si = segmentInfos.Info(i); if (infoStream != null) { infoStream.Write(" " + si.name + " (" + si.docCount + " docs)"); } Monodoc.Lucene.Net.Index.IndexReader reader = new SegmentReader(si); merger.Add(reader); if ((reader.Directory() == this.directory) || (reader.Directory() == this.ramDirectory)) { segmentsToDelete.Add(reader); // queue segment for deletion } } int mergedDocCount = merger.Merge(); if (infoStream != null) { infoStream.WriteLine(" into " + mergedName + " (" + mergedDocCount + " docs)"); } segmentInfos.RemoveRange(minSegment, segmentInfos.Count - minSegment); // pop old infos & add new segmentInfos.Add(new SegmentInfo(mergedName, mergedDocCount, directory)); // close readers before we attempt to delete now-obsolete segments merger.CloseReaders(); lock (directory) { // in- & inter-process sync new AnonymousClassWith2(segmentsToDelete, this, directory.MakeLock(IndexWriter.COMMIT_LOCK_NAME), COMMIT_LOCK_TIMEOUT).Run(); } }
/* Some operating systems (e.g. Windows) don't permit a file to be deleted * while it is opened for read (e.g. by another process or thread). So we * assume that when a delete fails it is because the file is open in another * process, and queue the file for subsequent deletion. */ private void DeleteSegments(System.Collections.ArrayList segments) { System.Collections.ArrayList deletable = System.Collections.ArrayList.Synchronized(new System.Collections.ArrayList(10)); DeleteFiles(ReadDeleteableFiles(), deletable); // try to delete deleteable for (int i = 0; i < segments.Count; i++) { SegmentReader reader = (SegmentReader)segments[i]; if (reader.Directory() == this.directory) { DeleteFiles(reader.Files(), deletable); } // try to delete our files else { DeleteFiles(reader.Files(), reader.Directory()); // delete other files } } WriteDeleteableFiles(deletable); // note files we can't delete }
/// <summary>Pops segments off of segmentInfos stack down to minSegment, merges them, /// and pushes the merged index onto the top of the segmentInfos stack. /// </summary> private void MergeSegments(int minSegment) { System.String mergedName = NewSegmentName(); if (infoStream != null) infoStream.Write("merging segments"); SegmentMerger merger = new SegmentMerger(directory, mergedName, useCompoundFile); System.Collections.ArrayList segmentsToDelete = System.Collections.ArrayList.Synchronized(new System.Collections.ArrayList(10)); for (int i = minSegment; i < segmentInfos.Count; i++) { SegmentInfo si = segmentInfos.Info(i); if (infoStream != null) infoStream.Write(" " + si.name + " (" + si.docCount + " docs)"); Monodoc.Lucene.Net.Index.IndexReader reader = new SegmentReader(si); merger.Add(reader); if ((reader.Directory() == this.directory) || (reader.Directory() == this.ramDirectory)) segmentsToDelete.Add(reader); // queue segment for deletion } int mergedDocCount = merger.Merge(); if (infoStream != null) { infoStream.WriteLine(" into " + mergedName + " (" + mergedDocCount + " docs)"); } segmentInfos.RemoveRange(minSegment, segmentInfos.Count - minSegment); // pop old infos & add new segmentInfos.Add(new SegmentInfo(mergedName, mergedDocCount, directory)); // close readers before we attempt to delete now-obsolete segments merger.CloseReaders(); lock (directory) { // in- & inter-process sync new AnonymousClassWith2(segmentsToDelete, this, directory.MakeLock(IndexWriter.COMMIT_LOCK_NAME), COMMIT_LOCK_TIMEOUT).Run(); } }