internal DocumentsWriterFlushControl(DocumentsWriter documentsWriter, LiveIndexWriterConfig config, BufferedUpdatesStream bufferedUpdatesStream) { this.infoStream = config.InfoStream; this.stallControl = new DocumentsWriterStallControl(); this.perThreadPool = documentsWriter.perThreadPool; this.flushPolicy = documentsWriter.flushPolicy; this.config = config; this.hardMaxBytesPerDWPT = config.RAMPerThreadHardLimitMB * 1024 * 1024; this.documentsWriter = documentsWriter; this.bufferedUpdatesStream = bufferedUpdatesStream; }
//Note: LUCENENET Specific Utility Method private void WriteToInfoStream(params string[] messages) { if (!InfoStream.IsEnabled(INFO_STREAM_COMPONENT)) { return; } foreach (string message in messages) { InfoStream.Message(INFO_STREAM_COMPONENT, message); } }
/// <summary> /// Sole constructor. </summary> public MergeThread(string name, IndexWriter writer, MergePolicy.OneMerge startMerge, InfoStream logger, bool isLoggingEnabled, ManualResetEventSlim resetEvent, Action <Exception> exceptionHandler) { Name = name; _cancellationTokenSource = new CancellationTokenSource(); _writer = writer; _startingMerge = startMerge; _logger = logger; _isLoggingEnabled = isLoggingEnabled; _resetEvent = resetEvent; _exceptionHandler = exceptionHandler; }
/// <summary> /// Called by <see cref="DocumentsWriter"/> to initialize the <see cref="FlushPolicy"/> /// </summary> protected internal virtual void Init(LiveIndexWriterConfig indexWriterConfig) { UninterruptableMonitor.Enter(this); try { this.m_indexWriterConfig = indexWriterConfig; m_infoStream = indexWriterConfig.InfoStream; } finally { UninterruptableMonitor.Exit(this); } }
public static void Info(object value) { string txt = InfoPrefix + value.ToString(); if (AutomaticNewline) { InfoStream.WriteLine(txt); } else { InfoStream.Write(txt); } }
public override int DoLogic() { IndexWriter iw = RunData.IndexWriter; if (iw != null) { // If infoStream was set to output to a file, close it. InfoStream infoStream = iw.Config.InfoStream; if (infoStream != null) { infoStream.Dispose(); } iw.Dispose(doWait); RunData.IndexWriter = null; } return(1); }
// note, just like in codec apis Directory 'dir' is NOT the same as segmentInfo.dir!! public SegmentMerger(IList <AtomicReader> readers, SegmentInfo segmentInfo, InfoStream infoStream, Directory dir, int termIndexInterval, MergeState.CheckAbort checkAbort, FieldInfos.FieldNumbers fieldNumbers, IOContext context, bool validate) { // validate incoming readers if (validate) { foreach (AtomicReader reader in readers) { reader.CheckIntegrity(); } } MergeState = new MergeState(readers, segmentInfo, infoStream, checkAbort); Directory = dir; this.TermIndexInterval = termIndexInterval; this.Codec = segmentInfo.Codec; this.Context = context; this.FieldInfosBuilder = new FieldInfos.Builder(fieldNumbers); MergeState.SegmentInfo.DocCount = SetDocMaps(); }
// note, just like in codec apis Directory 'dir' is NOT the same as segmentInfo.dir!! public SegmentMerger(IList<AtomicReader> readers, SegmentInfo segmentInfo, InfoStream infoStream, Directory dir, int termIndexInterval, MergeState.CheckAbort checkAbort, FieldInfos.FieldNumbers fieldNumbers, IOContext context, bool validate) { // validate incoming readers if (validate) { foreach (AtomicReader reader in readers) { reader.CheckIntegrity(); } } MergeState = new MergeState(readers, segmentInfo, infoStream, checkAbort); Directory = dir; this.TermIndexInterval = termIndexInterval; this.Codec = segmentInfo.Codec; this.Context = context; this.FieldInfosBuilder = new FieldInfos.Builder(fieldNumbers); MergeState.SegmentInfo.DocCount = SetDocMaps(); }
public override void OnDelete(DocumentsWriterFlushControl control, ThreadState state) { if (FlushOnDeleteTerms()) { // Flush this state by num del terms int maxBufferedDeleteTerms = IWConfig.MaxBufferedDeleteTerms; if (control.NumGlobalTermDeletes >= maxBufferedDeleteTerms) { control.SetApplyAllDeletes(); } } if ((FlushOnRAM() && control.DeleteBytesUsed > (1024 * 1024 * IWConfig.RAMBufferSizeMB))) { control.SetApplyAllDeletes(); if (InfoStream.IsEnabled("FP")) { InfoStream.Message("FP", "force apply deletes bytesUsed=" + control.DeleteBytesUsed + " vs ramBuffer=" + (1024 * 1024 * IWConfig.RAMBufferSizeMB)); } } }
/// <summary> /// Perform the upgrade. </summary> public void Upgrade() { if (!DirectoryReader.IndexExists(dir)) { throw new IndexNotFoundException(dir.ToString()); } if (!deletePriorCommits) { ICollection <IndexCommit> commits = DirectoryReader.ListCommits(dir); if (commits.Count > 1) { throw new System.ArgumentException("this tool was invoked to not delete prior commit points, but the following commits were found: " + commits); } } IndexWriterConfig c = (IndexWriterConfig)iwc.Clone(); c.MergePolicy = new UpgradeIndexMergePolicy(c.MergePolicy); c.IndexDeletionPolicy = new KeepOnlyLastCommitDeletionPolicy(); IndexWriter w = new IndexWriter(dir, c); try { InfoStream infoStream = c.InfoStream; if (infoStream.IsEnabled("IndexUpgrader")) { infoStream.Message("IndexUpgrader", "Upgrading all pre-" + Constants.LUCENE_MAIN_VERSION + " segments of index directory '" + dir + "' to version " + Constants.LUCENE_MAIN_VERSION + "..."); } w.ForceMerge(1); if (infoStream.IsEnabled("IndexUpgrader")) { infoStream.Message("IndexUpgrader", "All segments upgraded to version " + Constants.LUCENE_MAIN_VERSION); } } finally { w.Dispose(); } }
public override void OnInsert(DocumentsWriterFlushControl control, ThreadState state) { if (FlushOnDocCount() && state.Dwpt.NumDocsInRAM >= IWConfig.MaxBufferedDocs) { // Flush this state by num docs control.FlushPending = state; } // flush by RAM else if (FlushOnRAM()) { long limit = (long)(IWConfig.RAMBufferSizeMB * 1024d * 1024d); long totalRam = control.ActiveBytes() + control.DeleteBytesUsed; if (totalRam >= limit) { if (InfoStream.IsEnabled("FP")) { InfoStream.Message("FP", "flush: activeBytes=" + control.ActiveBytes() + " deleteBytes=" + control.DeleteBytesUsed + " vs limit=" + limit); } MarkLargestWriterPending(control, state, totalRam); } } }
// Writes a new line to the info stream. internal void WriteInfoNewLine() { InfoStream.WriteLine(); }
/// <summary> /// Creates a new <see cref="SimpleMergedSegmentWarmer"/> </summary> /// <param name="infoStream"> <see cref="InfoStream"/> to log statistics about warming. </param> public SimpleMergedSegmentWarmer(InfoStream infoStream) { this.infoStream = infoStream; }
/// <summary> /// Initialize the deleter: find all previous commits in /// the Directory, incref the files they reference, call /// the policy to let it delete commits. this will remove /// any files not referenced by any of the commits. </summary> /// <exception cref="IOException"> if there is a low-level IO error </exception> public IndexFileDeleter(Directory directory, IndexDeletionPolicy policy, SegmentInfos segmentInfos, InfoStream infoStream, IndexWriter writer, bool initialIndexExists) { this.InfoStream = infoStream; this.Writer = writer; string currentSegmentsFile = segmentInfos.SegmentsFileName; if (infoStream.IsEnabled("IFD")) { infoStream.Message("IFD", "init: current segments file is \"" + currentSegmentsFile + "\"; deletionPolicy=" + policy); } this.Policy = policy; this.Directory = directory; // First pass: walk the files and initialize our ref // counts: long currentGen = segmentInfos.Generation; CommitPoint currentCommitPoint = null; string[] files = null; try { files = directory.ListAll(); } catch (NoSuchDirectoryException e) { // it means the directory is empty, so ignore it. files = new string[0]; } if (currentSegmentsFile != null) { Regex r = IndexFileNames.CODEC_FILE_PATTERN; foreach (string fileName in files) { if (!fileName.EndsWith("write.lock") && !fileName.Equals(IndexFileNames.SEGMENTS_GEN) && (r.IsMatch(fileName) || fileName.StartsWith(IndexFileNames.SEGMENTS))) { // Add this file to refCounts with initial count 0: GetRefCount(fileName); if (fileName.StartsWith(IndexFileNames.SEGMENTS)) { // this is a commit (segments or segments_N), and // it's valid (<= the max gen). Load it, then // incref all files it refers to: if (infoStream.IsEnabled("IFD")) { infoStream.Message("IFD", "init: load commit \"" + fileName + "\""); } SegmentInfos sis = new SegmentInfos(); try { sis.Read(directory, fileName); } catch (FileNotFoundException e) { // LUCENE-948: on NFS (and maybe others), if // you have writers switching back and forth // between machines, it's very likely that the // dir listing will be stale and will claim a // file segments_X exists when in fact it // doesn't. So, we catch this and handle it // as if the file does not exist if (infoStream.IsEnabled("IFD")) { infoStream.Message("IFD", "init: hit FileNotFoundException when loading commit \"" + fileName + "\"; skipping this commit point"); } sis = null; } catch (IOException e) { if (SegmentInfos.GenerationFromSegmentsFileName(fileName) <= currentGen && directory.FileLength(fileName) > 0) { throw e; } else { // Most likely we are opening an index that // has an aborted "future" commit, so suppress // exc in this case sis = null; } } if (sis != null) { CommitPoint commitPoint = new CommitPoint(CommitsToDelete, directory, sis); if (sis.Generation == segmentInfos.Generation) { currentCommitPoint = commitPoint; } Commits.Add(commitPoint); IncRef(sis, true); if (LastSegmentInfos_Renamed == null || sis.Generation > LastSegmentInfos_Renamed.Generation) { LastSegmentInfos_Renamed = sis; } } } } } } if (currentCommitPoint == null && currentSegmentsFile != null && initialIndexExists) { // We did not in fact see the segments_N file // corresponding to the segmentInfos that was passed // in. Yet, it must exist, because our caller holds // the write lock. this can happen when the directory // listing was stale (eg when index accessed via NFS // client with stale directory listing cache). So we // try now to explicitly open this commit point: SegmentInfos sis = new SegmentInfos(); try { sis.Read(directory, currentSegmentsFile); } catch (IOException e) { throw new CorruptIndexException("failed to locate current segments_N file \"" + currentSegmentsFile + "\""); } if (infoStream.IsEnabled("IFD")) { infoStream.Message("IFD", "forced open of current segments file " + segmentInfos.SegmentsFileName); } currentCommitPoint = new CommitPoint(CommitsToDelete, directory, sis); Commits.Add(currentCommitPoint); IncRef(sis, true); } // We keep commits list in sorted order (oldest to newest): CollectionUtil.TimSort(Commits); // Now delete anything with ref count at 0. These are // presumably abandoned files eg due to crash of // IndexWriter. foreach (KeyValuePair<string, RefCount> entry in RefCounts) { RefCount rc = entry.Value; string fileName = entry.Key; if (0 == rc.Count) { if (infoStream.IsEnabled("IFD")) { infoStream.Message("IFD", "init: removing unreferenced file \"" + fileName + "\""); } DeleteFile(fileName); } } // Finally, give policy a chance to remove things on // startup: Policy.OnInit(Commits); // Always protect the incoming segmentInfos since // sometime it may not be the most recent commit Checkpoint(segmentInfos, false); StartingCommitDeleted = currentCommitPoint == null ? false : currentCommitPoint.Deleted; DeleteCommits(); }
/// <summary> /// Sole constructor. </summary> public SegmentWriteState(InfoStream infoStream, Directory directory, SegmentInfo segmentInfo, FieldInfos fieldInfos, int termIndexInterval, BufferedUpdates segUpdates, IOContext context) : this(infoStream, directory, segmentInfo, fieldInfos, termIndexInterval, segUpdates, context, "") { }
/// <summary> /// Initialize the deleter: find all previous commits in /// the <see cref="Directory"/>, incref the files they reference, call /// the policy to let it delete commits. this will remove /// any files not referenced by any of the commits. </summary> /// <exception cref="IOException"> if there is a low-level IO error </exception> public IndexFileDeleter(Directory directory, IndexDeletionPolicy policy, SegmentInfos segmentInfos, InfoStream infoStream, IndexWriter writer, bool initialIndexExists) { this.infoStream = infoStream; this.writer = writer; string currentSegmentsFile = segmentInfos.GetSegmentsFileName(); if (infoStream.IsEnabled("IFD")) { infoStream.Message("IFD", "init: current segments file is \"" + currentSegmentsFile + "\"; deletionPolicy=" + policy); } this.policy = policy; this.directory = directory; // First pass: walk the files and initialize our ref // counts: long currentGen = segmentInfos.Generation; CommitPoint currentCommitPoint = null; string[] files = null; try { files = directory.ListAll(); } #pragma warning disable 168 catch (DirectoryNotFoundException e) #pragma warning restore 168 { // it means the directory is empty, so ignore it. files = new string[0]; } if (currentSegmentsFile != null) { Regex r = IndexFileNames.CODEC_FILE_PATTERN; foreach (string fileName in files) { if (!fileName.EndsWith("write.lock", StringComparison.Ordinal) && !fileName.Equals(IndexFileNames.SEGMENTS_GEN, StringComparison.Ordinal) && (r.IsMatch(fileName) || fileName.StartsWith(IndexFileNames.SEGMENTS, StringComparison.Ordinal))) { // Add this file to refCounts with initial count 0: GetRefCount(fileName); if (fileName.StartsWith(IndexFileNames.SEGMENTS, StringComparison.Ordinal)) { // this is a commit (segments or segments_N), and // it's valid (<= the max gen). Load it, then // incref all files it refers to: if (infoStream.IsEnabled("IFD")) { infoStream.Message("IFD", "init: load commit \"" + fileName + "\""); } SegmentInfos sis = new SegmentInfos(); try { sis.Read(directory, fileName); } #pragma warning disable 168 catch (FileNotFoundException e) #pragma warning restore 168 { // LUCENE-948: on NFS (and maybe others), if // you have writers switching back and forth // between machines, it's very likely that the // dir listing will be stale and will claim a // file segments_X exists when in fact it // doesn't. So, we catch this and handle it // as if the file does not exist if (infoStream.IsEnabled("IFD")) { infoStream.Message("IFD", "init: hit FileNotFoundException when loading commit \"" + fileName + "\"; skipping this commit point"); } sis = null; } // LUCENENET specific - .NET (thankfully) only has one FileNotFoundException, so we don't need this //catch (NoSuchFileException) //{ // // LUCENE-948: on NFS (and maybe others), if // // you have writers switching back and forth // // between machines, it's very likely that the // // dir listing will be stale and will claim a // // file segments_X exists when in fact it // // doesn't. So, we catch this and handle it // // as if the file does not exist // if (infoStream.IsEnabled("IFD")) // { // infoStream.Message("IFD", "init: hit FileNotFoundException when loading commit \"" + fileName + "\"; skipping this commit point"); // } // sis = null; //} // LUCENENET specific - since NoSuchDirectoryException subclasses FileNotFoundException // in Lucene, we need to catch it here to be on the safe side. catch (System.IO.DirectoryNotFoundException) { // LUCENE-948: on NFS (and maybe others), if // you have writers switching back and forth // between machines, it's very likely that the // dir listing will be stale and will claim a // file segments_X exists when in fact it // doesn't. So, we catch this and handle it // as if the file does not exist if (infoStream.IsEnabled("IFD")) { infoStream.Message("IFD", "init: hit FileNotFoundException when loading commit \"" + fileName + "\"; skipping this commit point"); } sis = null; } catch (IOException /*e*/) { if (SegmentInfos.GenerationFromSegmentsFileName(fileName) <= currentGen && directory.FileLength(fileName) > 0) { throw; // LUCENENET: CA2200: Rethrow to preserve stack details (https://docs.microsoft.com/en-us/visualstudio/code-quality/ca2200-rethrow-to-preserve-stack-details) } else { // Most likely we are opening an index that // has an aborted "future" commit, so suppress // exc in this case sis = null; } } if (sis != null) { CommitPoint commitPoint = new CommitPoint(commitsToDelete, directory, sis); if (sis.Generation == segmentInfos.Generation) { currentCommitPoint = commitPoint; } commits.Add(commitPoint); IncRef(sis, true); if (lastSegmentInfos == null || sis.Generation > lastSegmentInfos.Generation) { lastSegmentInfos = sis; } } } } } } if (currentCommitPoint == null && currentSegmentsFile != null && initialIndexExists) { // We did not in fact see the segments_N file // corresponding to the segmentInfos that was passed // in. Yet, it must exist, because our caller holds // the write lock. this can happen when the directory // listing was stale (eg when index accessed via NFS // client with stale directory listing cache). So we // try now to explicitly open this commit point: SegmentInfos sis = new SegmentInfos(); try { sis.Read(directory, currentSegmentsFile); } catch (IOException e) { throw new CorruptIndexException("failed to locate current segments_N file \"" + currentSegmentsFile + "\"" + e.ToString(), e); } if (infoStream.IsEnabled("IFD")) { infoStream.Message("IFD", "forced open of current segments file " + segmentInfos.GetSegmentsFileName()); } currentCommitPoint = new CommitPoint(commitsToDelete, directory, sis); commits.Add(currentCommitPoint); IncRef(sis, true); } // We keep commits list in sorted order (oldest to newest): CollectionUtil.TimSort(commits); // Now delete anything with ref count at 0. These are // presumably abandoned files eg due to crash of // IndexWriter. foreach (KeyValuePair <string, RefCount> entry in refCounts) { RefCount rc = entry.Value; string fileName = entry.Key; if (0 == rc.count) { if (infoStream.IsEnabled("IFD")) { infoStream.Message("IFD", "init: removing unreferenced file \"" + fileName + "\""); } DeleteFile(fileName); } } // Finally, give policy a chance to remove things on // startup: this.policy.OnInit(commits); // Always protect the incoming segmentInfos since // sometime it may not be the most recent commit Checkpoint(segmentInfos, false); startingCommitDeleted = currentCommitPoint == null ? false : currentCommitPoint.IsDeleted; DeleteCommits(); }
// used by IndexWriterConfig internal LiveIndexWriterConfig(Analyzer analyzer, Version matchVersion) { this.analyzer = analyzer; this.MatchVersion = matchVersion; RamBufferSizeMB = IndexWriterConfig.DEFAULT_RAM_BUFFER_SIZE_MB; maxBufferedDocs = IndexWriterConfig.DEFAULT_MAX_BUFFERED_DOCS; maxBufferedDeleteTerms = IndexWriterConfig.DEFAULT_MAX_BUFFERED_DELETE_TERMS; readerTermsIndexDivisor = IndexWriterConfig.DEFAULT_READER_TERMS_INDEX_DIVISOR; mergedSegmentWarmer = null; termIndexInterval = IndexWriterConfig.DEFAULT_TERM_INDEX_INTERVAL; // TODO: this should be private to the codec, not settable here delPolicy = new KeepOnlyLastCommitDeletionPolicy(); Commit = null; useCompoundFile = IndexWriterConfig.DEFAULT_USE_COMPOUND_FILE_SYSTEM; openMode = OpenMode_e.CREATE_OR_APPEND; similarity = IndexSearcher.DefaultSimilarity; mergeScheduler = new ConcurrentMergeScheduler(); writeLockTimeout = IndexWriterConfig.WRITE_LOCK_TIMEOUT; indexingChain = DocumentsWriterPerThread.defaultIndexingChain; codec = Codec.Default; if (codec == null) { throw new System.NullReferenceException(); } infoStream = Util.InfoStream.Default; mergePolicy = new TieredMergePolicy(); flushPolicy = new FlushByRamOrCountsPolicy(); readerPooling = IndexWriterConfig.DEFAULT_READER_POOLING; indexerThreadPool = new ThreadAffinityDocumentsWriterThreadPool(IndexWriterConfig.DEFAULT_MAX_THREAD_STATES); PerThreadHardLimitMB = IndexWriterConfig.DEFAULT_RAM_PER_THREAD_HARD_LIMIT_MB; }
public BufferedUpdatesStream(InfoStream infoStream) { this.infoStream = infoStream; }
// Helper for writing info lines to the info stream. internal void WriteInfo(string infoMessage) { InfoStream.WriteLine(infoMessage); }
/// <summary> /// Sole constructor. </summary> internal MergeState(IList<AtomicReader> readers, SegmentInfo segmentInfo, InfoStream infoStream, CheckAbort checkAbort_) { this.Readers = readers; this.SegmentInfo = segmentInfo; this.InfoStream = infoStream; this.checkAbort = checkAbort_; }
internal DocState(DocumentsWriterPerThread docWriter, InfoStream infoStream) { this.docWriter = docWriter; this.infoStream = infoStream; }
public TestPointInfoStream(InfoStream @delegate, TestPoint testPoint) { this.@delegate = @delegate ?? new NullInfoStream(); this.TestPoint = testPoint; }
/// <summary> /// Creates a new SimpleMergedSegmentWarmer </summary> /// <param name="infoStream"> InfoStream to log statistics about warming. </param> public SimpleMergedSegmentWarmer(InfoStream infoStream) { this.InfoStream = infoStream; }
/// <summary> /// Creates a new config that that handles the live <seealso cref="IndexWriter"/> /// settings. /// </summary> internal LiveIndexWriterConfig(IndexWriterConfig config) { maxBufferedDeleteTerms = config.MaxBufferedDeleteTerms; maxBufferedDocs = config.MaxBufferedDocs; mergedSegmentWarmer = config.MergedSegmentWarmer; RamBufferSizeMB = config.RAMBufferSizeMB; readerTermsIndexDivisor = config.ReaderTermsIndexDivisor; termIndexInterval = config.TermIndexInterval; MatchVersion = config.MatchVersion; analyzer = config.Analyzer; delPolicy = config.DelPolicy; Commit = config.IndexCommit; openMode = config.OpenMode; similarity = config.Similarity; mergeScheduler = config.MergeScheduler; writeLockTimeout = config.WriteLockTimeout; indexingChain = config.IndexingChain; codec = config.Codec; infoStream = config.InfoStream; mergePolicy = config.MergePolicy; indexerThreadPool = config.IndexerThreadPool; readerPooling = config.ReaderPooling; flushPolicy = config.FlushPolicy; PerThreadHardLimitMB = config.RAMPerThreadHardLimitMB; useCompoundFile = config.UseCompoundFile; checkIntegrityAtMerge = config.CheckIntegrityAtMerge; }
public DocumentsWriterPerThread(string segmentName, Directory directory, LiveIndexWriterConfig indexWriterConfig, InfoStream infoStream, DocumentsWriterDeleteQueue deleteQueue, FieldInfos.Builder fieldInfos) { this.directoryOrig = directory; this.directory = new TrackingDirectoryWrapper(directory); this.fieldInfos = fieldInfos; this.indexWriterConfig = indexWriterConfig; this.infoStream = infoStream; this.codec = indexWriterConfig.Codec; this.docState = new DocState(this, infoStream); this.docState.similarity = indexWriterConfig.Similarity; bytesUsed = Counter.NewCounter(); byteBlockAllocator = new DirectTrackingAllocator(bytesUsed); pendingUpdates = new BufferedUpdates(); intBlockAllocator = new Int32BlockAllocator(bytesUsed); this.deleteQueue = deleteQueue; if (Debugging.AssertsEnabled) { Debugging.Assert(numDocsInRAM == 0, "num docs {0}", numDocsInRAM); } pendingUpdates.Clear(); deleteSlice = deleteQueue.NewSlice(); segmentInfo = new SegmentInfo(directoryOrig, Constants.LUCENE_MAIN_VERSION, segmentName, -1, false, codec, null); if (Debugging.AssertsEnabled) { Debugging.Assert(numDocsInRAM == 0); } if (INFO_VERBOSE && infoStream.IsEnabled("DWPT")) { infoStream.Message("DWPT", Thread.CurrentThread.Name + " init seg=" + segmentName + " delQueue=" + deleteQueue); } // this should be the last call in the ctor // it really sucks that we need to pull this within the ctor and pass this ref to the chain! consumer = indexWriterConfig.IndexingChain.GetChain(this); }
/// <summary> /// NOTE: this method creates a compound file for all files returned by /// info.files(). While, generally, this may include separate norms and /// deletion files, this SegmentInfo must not reference such files when this /// method is called, because they are not allowed within a compound file. /// </summary> public static ICollection<string> CreateCompoundFile(InfoStream infoStream, Directory directory, CheckAbort checkAbort, SegmentInfo info, IOContext context) { string fileName = Index.IndexFileNames.SegmentFileName(info.Name, "", Lucene.Net.Index.IndexFileNames.COMPOUND_FILE_EXTENSION); if (infoStream.IsEnabled("IW")) { infoStream.Message("IW", "create compound file " + fileName); } Debug.Assert(Lucene3xSegmentInfoFormat.GetDocStoreOffset(info) == -1); // Now merge all added files ICollection<string> files = info.Files; CompoundFileDirectory cfsDir = new CompoundFileDirectory(directory, fileName, context, true); IOException prior = null; try { foreach (string file in files) { directory.Copy(cfsDir, file, file, context); checkAbort.Work(directory.FileLength(file)); } } catch (System.IO.IOException ex) { prior = ex; } finally { bool success = false; try { IOUtils.CloseWhileHandlingException(prior, cfsDir); success = true; } finally { if (!success) { try { directory.DeleteFile(fileName); } catch (Exception) { } try { directory.DeleteFile(Lucene.Net.Index.IndexFileNames.SegmentFileName(info.Name, "", Lucene.Net.Index.IndexFileNames.COMPOUND_FILE_ENTRIES_EXTENSION)); } catch (Exception) { } } } } // Replace all previous files with the CFS/CFE files: HashSet<string> siFiles = new HashSet<string>(); siFiles.Add(fileName); siFiles.Add(Lucene.Net.Index.IndexFileNames.SegmentFileName(info.Name, "", Lucene.Net.Index.IndexFileNames.COMPOUND_FILE_ENTRIES_EXTENSION)); info.Files = siFiles; return files; }