internal virtual FlushedSegment Flush() { if (Debugging.AssertsEnabled) { Debugging.Assert(numDocsInRAM > 0); Debugging.Assert(deleteSlice.IsEmpty, "all deletes must be applied in prepareFlush"); } segmentInfo.DocCount = numDocsInRAM; SegmentWriteState flushState = new SegmentWriteState(infoStream, directory, segmentInfo, fieldInfos.Finish(), indexWriterConfig.TermIndexInterval, pendingUpdates, new IOContext(new FlushInfo(numDocsInRAM, BytesUsed))); double startMBUsed = BytesUsed / 1024.0 / 1024.0; // Apply delete-by-docID now (delete-byDocID only // happens when an exception is hit processing that // doc, eg if analyzer has some problem w/ the text): if (pendingUpdates.docIDs.Count > 0) { flushState.LiveDocs = codec.LiveDocsFormat.NewLiveDocs(numDocsInRAM); foreach (int delDocID in pendingUpdates.docIDs) { flushState.LiveDocs.Clear(delDocID); } flushState.DelCountOnFlush = pendingUpdates.docIDs.Count; pendingUpdates.bytesUsed.AddAndGet(-pendingUpdates.docIDs.Count * BufferedUpdates.BYTES_PER_DEL_DOCID); pendingUpdates.docIDs.Clear(); } if (aborting) { if (infoStream.IsEnabled("DWPT")) { infoStream.Message("DWPT", "flush: skip because aborting is set"); } return(null); } if (infoStream.IsEnabled("DWPT")) { infoStream.Message("DWPT", "flush postings as segment " + flushState.SegmentInfo.Name + " numDocs=" + numDocsInRAM); } bool success = false; try { consumer.Flush(flushState); pendingUpdates.terms.Clear(); segmentInfo.SetFiles(new JCG.HashSet <string>(directory.CreatedFiles)); SegmentCommitInfo segmentInfoPerCommit = new SegmentCommitInfo(segmentInfo, 0, -1L, -1L); if (infoStream.IsEnabled("DWPT")) { infoStream.Message("DWPT", "new segment has " + (flushState.LiveDocs == null ? 0 : (flushState.SegmentInfo.DocCount - flushState.DelCountOnFlush)) + " deleted docs"); infoStream.Message("DWPT", "new segment has " + (flushState.FieldInfos.HasVectors ? "vectors" : "no vectors") + "; " + (flushState.FieldInfos.HasNorms ? "norms" : "no norms") + "; " + (flushState.FieldInfos.HasDocValues ? "docValues" : "no docValues") + "; " + (flushState.FieldInfos.HasProx ? "prox" : "no prox") + "; " + (flushState.FieldInfos.HasFreq ? "freqs" : "no freqs")); infoStream.Message("DWPT", "flushedFiles=" + string.Format(J2N.Text.StringFormatter.InvariantCulture, "{0}", segmentInfoPerCommit.GetFiles())); infoStream.Message("DWPT", "flushed codec=" + codec); } BufferedUpdates segmentDeletes; if (pendingUpdates.queries.Count == 0 && pendingUpdates.numericUpdates.Count == 0 && pendingUpdates.binaryUpdates.Count == 0) { pendingUpdates.Clear(); segmentDeletes = null; } else { segmentDeletes = pendingUpdates; } if (infoStream.IsEnabled("DWPT")) { double newSegmentSize = segmentInfoPerCommit.GetSizeInBytes() / 1024.0 / 1024.0; infoStream.Message("DWPT", "flushed: segment=" + segmentInfo.Name + " ramUsed=" + startMBUsed.ToString(nf) + " MB" + " newFlushedSize(includes docstores)=" + newSegmentSize.ToString(nf) + " MB" + " docs/MB=" + (flushState.SegmentInfo.DocCount / newSegmentSize).ToString(nf)); } if (Debugging.AssertsEnabled) { Debugging.Assert(segmentInfo != null); } FlushedSegment fs = new FlushedSegment(segmentInfoPerCommit, flushState.FieldInfos, segmentDeletes, flushState.LiveDocs, flushState.DelCountOnFlush); SealFlushedSegment(fs); success = true; return(fs); } finally { if (!success) { Abort(filesToDelete); } } }
internal virtual void SealFlushedSegment(FlushedSegment flushedSegment) { if (Debugging.AssertsEnabled) { Debugging.Assert(flushedSegment != null); } SegmentCommitInfo newSegment = flushedSegment.segmentInfo; IndexWriter.SetDiagnostics(newSegment.Info, IndexWriter.SOURCE_FLUSH); IOContext context = new IOContext(new FlushInfo(newSegment.Info.DocCount, newSegment.GetSizeInBytes())); bool success = false; try { if (indexWriterConfig.UseCompoundFile) { filesToDelete.UnionWith(IndexWriter.CreateCompoundFile(infoStream, directory, CheckAbort.NONE, newSegment.Info, context)); newSegment.Info.UseCompoundFile = true; } // Have codec write SegmentInfo. Must do this after // creating CFS so that 1) .si isn't slurped into CFS, // and 2) .si reflects useCompoundFile=true change // above: codec.SegmentInfoFormat.SegmentInfoWriter.Write(directory, newSegment.Info, flushedSegment.fieldInfos, context); // TODO: ideally we would freeze newSegment here!! // because any changes after writing the .si will be // lost... // Must write deleted docs after the CFS so we don't // slurp the del file into CFS: if (flushedSegment.liveDocs != null) { int delCount = flushedSegment.delCount; if (Debugging.AssertsEnabled) { Debugging.Assert(delCount > 0); } if (infoStream.IsEnabled("DWPT")) { infoStream.Message("DWPT", "flush: write " + delCount + " deletes gen=" + flushedSegment.segmentInfo.DelGen); } // TODO: we should prune the segment if it's 100% // deleted... but merge will also catch it. // TODO: in the NRT case it'd be better to hand // this del vector over to the // shortly-to-be-opened SegmentReader and let it // carry the changes; there's no reason to use // filesystem as intermediary here. SegmentCommitInfo info = flushedSegment.segmentInfo; Codec codec = info.Info.Codec; codec.LiveDocsFormat.WriteLiveDocs(flushedSegment.liveDocs, directory, info, delCount, context); newSegment.DelCount = delCount; newSegment.AdvanceDelGen(); } success = true; } finally { if (!success) { if (infoStream.IsEnabled("DWPT")) { infoStream.Message("DWPT", "hit exception creating compound file for newly flushed segment " + newSegment.Info.Name); } } } }
public override MergeSpecification FindMerges(MergeTrigger mergeTrigger, SegmentInfos infos) { if (Verbose()) { Message("findMerges: " + infos.Count + " segments"); } if (infos.Count == 0) { return(null); } ICollection <SegmentCommitInfo> merging = m_writer.Get().MergingSegments; ICollection <SegmentCommitInfo> toBeMerged = new JCG.HashSet <SegmentCommitInfo>(); List <SegmentCommitInfo> infosSorted = new List <SegmentCommitInfo>(infos.AsList()); infosSorted.Sort(new SegmentByteSizeDescending(this)); // Compute total index bytes & print details about the index long totIndexBytes = 0; long minSegmentBytes = long.MaxValue; foreach (SegmentCommitInfo info in infosSorted) { long segBytes = Size(info); if (Verbose()) { string extra = merging.Contains(info) ? " [merging]" : ""; if (segBytes >= maxMergedSegmentBytes / 2.0) { extra += " [skip: too large]"; } else if (segBytes < floorSegmentBytes) { extra += " [floored]"; } Message(" seg=" + m_writer.Get().SegString(info) + " size=" + string.Format("{0:0.000}", segBytes / 1024 / 1024.0) + " MB" + extra); } minSegmentBytes = Math.Min(segBytes, minSegmentBytes); // Accum total byte size totIndexBytes += segBytes; } // If we have too-large segments, grace them out // of the maxSegmentCount: int tooBigCount = 0; while (tooBigCount < infosSorted.Count && Size(infosSorted[tooBigCount]) >= maxMergedSegmentBytes / 2.0) { totIndexBytes -= Size(infosSorted[tooBigCount]); tooBigCount++; } minSegmentBytes = FloorSize(minSegmentBytes); // Compute max allowed segs in the index long levelSize = minSegmentBytes; long bytesLeft = totIndexBytes; double allowedSegCount = 0; while (true) { double segCountLevel = bytesLeft / (double)levelSize; if (segCountLevel < segsPerTier) { allowedSegCount += Math.Ceiling(segCountLevel); break; } allowedSegCount += segsPerTier; bytesLeft -= (long)(segsPerTier * levelSize); levelSize *= maxMergeAtOnce; } int allowedSegCountInt = (int)allowedSegCount; MergeSpecification spec = null; // Cycle to possibly select more than one merge: while (true) { long mergingBytes = 0; // Gather eligible segments for merging, ie segments // not already being merged and not already picked (by // prior iteration of this loop) for merging: IList <SegmentCommitInfo> eligible = new List <SegmentCommitInfo>(); for (int idx = tooBigCount; idx < infosSorted.Count; idx++) { SegmentCommitInfo info = infosSorted[idx]; if (merging.Contains(info)) { mergingBytes += info.GetSizeInBytes(); } else if (!toBeMerged.Contains(info)) { eligible.Add(info); } } bool maxMergeIsRunning = mergingBytes >= maxMergedSegmentBytes; if (Verbose()) { Message(" allowedSegmentCount=" + allowedSegCountInt + " vs count=" + infosSorted.Count + " (eligible count=" + eligible.Count + ") tooBigCount=" + tooBigCount); } if (eligible.Count == 0) { return(spec); } if (eligible.Count >= allowedSegCountInt) { // OK we are over budget -- find best merge! MergeScore bestScore = null; IList <SegmentCommitInfo> best = null; bool bestTooLarge = false; long bestMergeBytes = 0; // Consider all merge starts: for (int startIdx = 0; startIdx <= eligible.Count - maxMergeAtOnce; startIdx++) { long totAfterMergeBytes = 0; IList <SegmentCommitInfo> candidate = new List <SegmentCommitInfo>(); bool hitTooLarge = false; for (int idx = startIdx; idx < eligible.Count && candidate.Count < maxMergeAtOnce; idx++) { SegmentCommitInfo info = eligible[idx]; long segBytes = Size(info); if (totAfterMergeBytes + segBytes > maxMergedSegmentBytes) { hitTooLarge = true; // NOTE: we continue, so that we can try // "packing" smaller segments into this merge // to see if we can get closer to the max // size; this in general is not perfect since // this is really "bin packing" and we'd have // to try different permutations. continue; } candidate.Add(info); totAfterMergeBytes += segBytes; } MergeScore score = Score(candidate, hitTooLarge, mergingBytes); if (Verbose()) { Message(" maybe=" + m_writer.Get().SegString(candidate) + " score=" + score.Score + " " + score.Explanation + " tooLarge=" + hitTooLarge + " size=" + string.Format("{0:0.000} MB", totAfterMergeBytes / 1024.0 / 1024.0)); } // If we are already running a max sized merge // (maxMergeIsRunning), don't allow another max // sized merge to kick off: if ((bestScore == null || score.Score < bestScore.Score) && (!hitTooLarge || !maxMergeIsRunning)) { best = candidate; bestScore = score; bestTooLarge = hitTooLarge; bestMergeBytes = totAfterMergeBytes; } } if (best != null) { if (spec == null) { spec = new MergeSpecification(); } OneMerge merge = new OneMerge(best); spec.Add(merge); foreach (SegmentCommitInfo info in merge.Segments) { toBeMerged.Add(info); } if (Verbose()) { Message(" add merge=" + m_writer.Get().SegString(merge.Segments) + " size=" + string.Format("{0:0.000} MB", bestMergeBytes / 1024.0 / 1024.0) + " score=" + string.Format("{0:0.000}", bestScore.Score) + " " + bestScore.Explanation + (bestTooLarge ? " [max merge]" : "")); } } else { return(spec); } } else { return(spec); } } }