/// <summary>Pops segments off of segmentInfos stack down to minSegment, merges them,
        /// and pushes the merged index onto the top of the segmentInfos stack.
        /// </summary>
        private void  MergeSegments(int minSegment)
        {
            System.String mergedName = NewSegmentName();
            if (infoStream != null)
            {
                infoStream.Write("merging segments");
            }
            SegmentMerger merger = new SegmentMerger(directory, mergedName, useCompoundFile);

            System.Collections.ArrayList segmentsToDelete = System.Collections.ArrayList.Synchronized(new System.Collections.ArrayList(10));
            for (int i = minSegment; i < segmentInfos.Count; i++)
            {
                SegmentInfo si = segmentInfos.Info(i);
                if (infoStream != null)
                {
                    infoStream.Write(" " + si.name + " (" + si.docCount + " docs)");
                }
                Monodoc.Lucene.Net.Index.IndexReader reader = new SegmentReader(si);
                merger.Add(reader);
                if ((reader.Directory() == this.directory) || (reader.Directory() == this.ramDirectory))
                {
                    segmentsToDelete.Add(reader); // queue segment for deletion
                }
            }

            int mergedDocCount = merger.Merge();

            if (infoStream != null)
            {
                infoStream.WriteLine(" into " + mergedName + " (" + mergedDocCount + " docs)");
            }

            segmentInfos.RemoveRange(minSegment, segmentInfos.Count - minSegment); // pop old infos & add new
            segmentInfos.Add(new SegmentInfo(mergedName, mergedDocCount, directory));

            // close readers before we attempt to delete now-obsolete segments
            merger.CloseReaders();

            lock (directory)
            {
                // in- & inter-process sync
                new AnonymousClassWith2(segmentsToDelete, this, directory.MakeLock(IndexWriter.COMMIT_LOCK_NAME), COMMIT_LOCK_TIMEOUT).Run();
            }
        }
		/// <summary>Pops segments off of segmentInfos stack down to minSegment, merges them,
		/// and pushes the merged index onto the top of the segmentInfos stack. 
		/// </summary>
		private void  MergeSegments(int minSegment)
		{
			System.String mergedName = NewSegmentName();
			if (infoStream != null)
				infoStream.Write("merging segments");
			SegmentMerger merger = new SegmentMerger(directory, mergedName, useCompoundFile);
			
			System.Collections.ArrayList segmentsToDelete = System.Collections.ArrayList.Synchronized(new System.Collections.ArrayList(10));
			for (int i = minSegment; i < segmentInfos.Count; i++)
			{
				SegmentInfo si = segmentInfos.Info(i);
				if (infoStream != null)
					infoStream.Write(" " + si.name + " (" + si.docCount + " docs)");
				Monodoc.Lucene.Net.Index.IndexReader reader = new SegmentReader(si);
				merger.Add(reader);
				if ((reader.Directory() == this.directory) || (reader.Directory() == this.ramDirectory))
					segmentsToDelete.Add(reader); // queue segment for deletion
			}
			
			int mergedDocCount = merger.Merge();
			
			if (infoStream != null)
			{
				infoStream.WriteLine(" into " + mergedName + " (" + mergedDocCount + " docs)");
			}
			
			segmentInfos.RemoveRange(minSegment, segmentInfos.Count - minSegment); // pop old infos & add new
			segmentInfos.Add(new SegmentInfo(mergedName, mergedDocCount, directory));
			
			// close readers before we attempt to delete now-obsolete segments
			merger.CloseReaders();
			
			lock (directory)
			{
				// in- & inter-process sync
				new AnonymousClassWith2(segmentsToDelete, this, directory.MakeLock(IndexWriter.COMMIT_LOCK_NAME), COMMIT_LOCK_TIMEOUT).Run();
			}
		}