internal void AddFlushableState(ThreadState perThread) { if (infoStream.IsEnabled("DWFC")) { infoStream.Message("DWFC", "addFlushableState " + perThread.dwpt); } DocumentsWriterPerThread dwpt = perThread.dwpt; //Debug.Assert(perThread.HeldByCurrentThread); Debug.Assert(perThread.IsInitialized); Debug.Assert(fullFlush); Debug.Assert(dwpt.deleteQueue != documentsWriter.deleteQueue); if (dwpt.NumDocsInRAM > 0) { lock (this) { if (!perThread.flushPending) { SetFlushPending(perThread); } DocumentsWriterPerThread flushingDWPT = InternalTryCheckOutForFlush(perThread); Debug.Assert(flushingDWPT != null, "DWPT must never be null here since we hold the lock and it holds documents"); Debug.Assert(dwpt == flushingDWPT, "flushControl returned different DWPT"); fullFlushBuffer.Add(flushingDWPT); } } else { perThreadPool.Reset(perThread, closed); // make this state inactive } }
// Appends a new packet of buffered deletes to the stream, // setting its generation: public virtual long Push(FrozenBufferedUpdates packet) { lock (this) { /* * The insert operation must be atomic. If we let threads increment the gen * and push the packet afterwards we risk that packets are out of order. * With DWPT this is possible if two or more flushes are racing for pushing * updates. If the pushed packets get our of order would loose documents * since deletes are applied to the wrong segments. */ packet.DelGen = NextGen_Renamed++; Debug.Assert(packet.Any()); Debug.Assert(CheckDeleteStats()); Debug.Assert(packet.DelGen < NextGen_Renamed); Debug.Assert(Updates.Count == 0 || Updates[Updates.Count - 1].DelGen < packet.DelGen, "Delete packets must be in order"); Updates.Add(packet); numTerms.AddAndGet(packet.NumTermDeletes); bytesUsed.AddAndGet(packet.BytesUsed); if (InfoStream.IsEnabled("BD")) { InfoStream.Message("BD", "push deletes " + packet + " delGen=" + packet.DelGen + " packetCount=" + Updates.Count + " totBytesUsed=" + bytesUsed.Get()); } Debug.Assert(CheckDeleteStats()); return(packet.DelGen); } }
public override void Warm(AtomicReader reader) { long startTime = Environment.TickCount; int indexedCount = 0; int docValuesCount = 0; int normsCount = 0; foreach (FieldInfo info in reader.FieldInfos) { if (info.IsIndexed) { reader.GetTerms(info.Name); indexedCount++; if (info.HasNorms) { reader.GetNormValues(info.Name); normsCount++; } } if (info.HasDocValues) { switch (info.DocValuesType) { case DocValuesType.NUMERIC: reader.GetNumericDocValues(info.Name); break; case DocValuesType.BINARY: reader.GetBinaryDocValues(info.Name); break; case DocValuesType.SORTED: reader.GetSortedDocValues(info.Name); break; case DocValuesType.SORTED_SET: reader.GetSortedSetDocValues(info.Name); break; default: if (Debugging.AssertsEnabled) { Debugging.Assert(false); // unknown dv type } break; } docValuesCount++; } } reader.Document(0); reader.GetTermVectors(0); if (infoStream.IsEnabled("SMSW")) { infoStream.Message("SMSW", "Finished warming segment: " + reader + ", indexed=" + indexedCount + ", docValues=" + docValuesCount + ", norms=" + normsCount + ", time=" + (Environment.TickCount - startTime)); } }
public DocumentsWriterPerThread(string segmentName, Directory directory, LiveIndexWriterConfig indexWriterConfig, InfoStream infoStream, DocumentsWriterDeleteQueue deleteQueue, FieldInfos.Builder fieldInfos) { this.directoryOrig = directory; this.directory = new TrackingDirectoryWrapper(directory); this.fieldInfos = fieldInfos; this.indexWriterConfig = indexWriterConfig; this.infoStream = infoStream; this.codec = indexWriterConfig.Codec; this.docState = new DocState(this, infoStream); this.docState.similarity = indexWriterConfig.Similarity; bytesUsed = Counter.NewCounter(); byteBlockAllocator = new DirectTrackingAllocator(bytesUsed); pendingUpdates = new BufferedUpdates(); intBlockAllocator = new Int32BlockAllocator(bytesUsed); this.deleteQueue = deleteQueue; if (Debugging.AssertsEnabled) { Debugging.Assert(numDocsInRAM == 0, "num docs {0}", numDocsInRAM); } pendingUpdates.Clear(); deleteSlice = deleteQueue.NewSlice(); segmentInfo = new SegmentInfo(directoryOrig, Constants.LUCENE_MAIN_VERSION, segmentName, -1, false, codec, null); if (Debugging.AssertsEnabled) { Debugging.Assert(numDocsInRAM == 0); } if (INFO_VERBOSE && infoStream.IsEnabled("DWPT")) { infoStream.Message("DWPT", Thread.CurrentThread.Name + " init seg=" + segmentName + " delQueue=" + deleteQueue); } // this should be the last call in the ctor // it really sucks that we need to pull this within the ctor and pass this ref to the chain! consumer = indexWriterConfig.IndexingChain.GetChain(this); }
// LUCENENET specific Utility Method private void WriteToInfoStream(string message) { if (infoStream.IsEnabled(INFO_STREAM_COMPONENT)) { infoStream.Message(INFO_STREAM_COMPONENT, message); } }
/// <summary> /// Called if we hit an exception at a bad time (when /// updating the index files) and must discard all /// currently buffered docs. this resets our state, /// discarding any docs added since last flush. /// </summary> internal virtual void Abort(ISet <string> createdFiles) { //System.out.println(Thread.currentThread().getName() + ": now abort seg=" + segmentInfo.name); HasAborted = Aborting = true; try { if (InfoStream.IsEnabled("DWPT")) { InfoStream.Message("DWPT", "now abort"); } try { Consumer.Abort(); } catch (Exception t) { } PendingUpdates.Clear(); CollectionsHelper.AddAll(createdFiles, Directory.CreatedFiles); } finally { Aborting = false; if (InfoStream.IsEnabled("DWPT")) { InfoStream.Message("DWPT", "done abort"); } } }
private bool AssertMessage(string s) { if (m_infoStream.IsEnabled("FP")) { m_infoStream.Message("FP", s); } return(true); }
internal void AddFlushableState(ThreadState perThread) { if (infoStream.IsEnabled("DWFC")) { infoStream.Message("DWFC", "addFlushableState " + perThread.dwpt); } DocumentsWriterPerThread dwpt = perThread.dwpt; if (Debugging.AssertsEnabled) { Debugging.Assert(perThread.IsHeldByCurrentThread); Debugging.Assert(perThread.IsInitialized); Debugging.Assert(fullFlush); Debugging.Assert(dwpt.deleteQueue != documentsWriter.deleteQueue); } if (dwpt.NumDocsInRAM > 0) { UninterruptableMonitor.Enter(this); try { if (!perThread.flushPending) { SetFlushPending(perThread); } DocumentsWriterPerThread flushingDWPT = InternalTryCheckOutForFlush(perThread); if (Debugging.AssertsEnabled) { Debugging.Assert(flushingDWPT != null, "DWPT must never be null here since we hold the lock and it holds documents"); Debugging.Assert(dwpt == flushingDWPT, "flushControl returned different DWPT"); } fullFlushBuffer.Add(flushingDWPT); } finally { UninterruptableMonitor.Exit(this); } } else { DocumentsWriterPerThreadPool.Reset(perThread, closed); // make this state inactive // LUCENENET specific - made method static per CA1822 } }
public override void Message(string component, string message) { if ("TP".Equals(component, StringComparison.Ordinal)) { TestPoint.Apply(message); } if (@delegate.IsEnabled(component)) { @delegate.Message(component, message); } }
/// <summary> /// Perform the upgrade. </summary> public void Upgrade() { if (!DirectoryReader.IndexExists(dir)) { throw new IndexNotFoundException(dir.ToString()); } if (!deletePriorCommits) { ICollection <IndexCommit> commits = DirectoryReader.ListCommits(dir); if (commits.Count > 1) { throw new System.ArgumentException("this tool was invoked to not delete prior commit points, but the following commits were found: " + commits); } } IndexWriterConfig c = (IndexWriterConfig)iwc.Clone(); c.MergePolicy = new UpgradeIndexMergePolicy(c.MergePolicy); c.IndexDeletionPolicy = new KeepOnlyLastCommitDeletionPolicy(); IndexWriter w = new IndexWriter(dir, c); try { InfoStream infoStream = c.InfoStream; if (infoStream.IsEnabled("IndexUpgrader")) { infoStream.Message("IndexUpgrader", "Upgrading all pre-" + Constants.LUCENE_MAIN_VERSION + " segments of index directory '" + dir + "' to version " + Constants.LUCENE_MAIN_VERSION + "..."); } w.ForceMerge(1); if (infoStream.IsEnabled("IndexUpgrader")) { infoStream.Message("IndexUpgrader", "All segments upgraded to version " + Constants.LUCENE_MAIN_VERSION); } } finally { w.Dispose(); } }
//Note: LUCENENET Specific Utility Method private void WriteToInfoStream(params string[] messages) { if (!InfoStream.IsEnabled(INFO_STREAM_COMPONENT)) { return; } foreach (string message in messages) { InfoStream.Message(INFO_STREAM_COMPONENT, message); } }
public override void OnDelete(DocumentsWriterFlushControl control, ThreadState state) { if (FlushOnDeleteTerms()) { // Flush this state by num del terms int maxBufferedDeleteTerms = IWConfig.MaxBufferedDeleteTerms; if (control.NumGlobalTermDeletes >= maxBufferedDeleteTerms) { control.SetApplyAllDeletes(); } } if ((FlushOnRAM() && control.DeleteBytesUsed > (1024 * 1024 * IWConfig.RAMBufferSizeMB))) { control.SetApplyAllDeletes(); if (InfoStream.IsEnabled("FP")) { InfoStream.Message("FP", "force apply deletes bytesUsed=" + control.DeleteBytesUsed + " vs ramBuffer=" + (1024 * 1024 * IWConfig.RAMBufferSizeMB)); } } }
/// <summary> /// Appends a new packet of buffered deletes to the stream, /// setting its generation: /// </summary> public virtual long Push(FrozenBufferedUpdates packet) { UninterruptableMonitor.Enter(this); try { /* * The insert operation must be atomic. If we let threads increment the gen * and push the packet afterwards we risk that packets are out of order. * With DWPT this is possible if two or more flushes are racing for pushing * updates. If the pushed packets get our of order would loose documents * since deletes are applied to the wrong segments. */ packet.DelGen = nextGen++; if (Debugging.AssertsEnabled) { Debugging.Assert(packet.Any()); Debugging.Assert(CheckDeleteStats()); Debugging.Assert(packet.DelGen < nextGen); Debugging.Assert(updates.Count == 0 || updates[updates.Count - 1].DelGen < packet.DelGen, "Delete packets must be in order"); } updates.Add(packet); numTerms.AddAndGet(packet.numTermDeletes); bytesUsed.AddAndGet(packet.bytesUsed); if (infoStream.IsEnabled("BD")) { infoStream.Message("BD", "push deletes " + packet + " delGen=" + packet.DelGen + " packetCount=" + updates.Count + " totBytesUsed=" + bytesUsed); } if (Debugging.AssertsEnabled) { Debugging.Assert(CheckDeleteStats()); } return(packet.DelGen); } finally { UninterruptableMonitor.Exit(this); } }
public override void OnInsert(DocumentsWriterFlushControl control, ThreadState state) { if (FlushOnDocCount() && state.Dwpt.NumDocsInRAM >= IWConfig.MaxBufferedDocs) { // Flush this state by num docs control.FlushPending = state; } // flush by RAM else if (FlushOnRAM()) { long limit = (long)(IWConfig.RAMBufferSizeMB * 1024d * 1024d); long totalRam = control.ActiveBytes() + control.DeleteBytesUsed; if (totalRam >= limit) { if (InfoStream.IsEnabled("FP")) { InfoStream.Message("FP", "flush: activeBytes=" + control.ActiveBytes() + " deleteBytes=" + control.DeleteBytesUsed + " vs limit=" + limit); } MarkLargestWriterPending(control, state, totalRam); } } }
public override void Warm(AtomicReader reader) { long startTime = J2N.Time.NanoTime() / J2N.Time.MillisecondsPerNanosecond; // LUCENENET: Use NanoTime() rather than CurrentTimeMilliseconds() for more accurate/reliable results int indexedCount = 0; int docValuesCount = 0; int normsCount = 0; foreach (FieldInfo info in reader.FieldInfos) { if (info.IsIndexed) { reader.GetTerms(info.Name); indexedCount++; if (info.HasNorms) { reader.GetNormValues(info.Name); normsCount++; } } if (info.HasDocValues) { switch (info.DocValuesType) { case DocValuesType.NUMERIC: reader.GetNumericDocValues(info.Name); break; case DocValuesType.BINARY: reader.GetBinaryDocValues(info.Name); break; case DocValuesType.SORTED: reader.GetSortedDocValues(info.Name); break; case DocValuesType.SORTED_SET: reader.GetSortedSetDocValues(info.Name); break; default: if (Debugging.AssertsEnabled) { Debugging.Assert(false); // unknown dv type } break; } docValuesCount++; } } reader.Document(0); reader.GetTermVectors(0); if (infoStream.IsEnabled("SMSW")) { infoStream.Message("SMSW", "Finished warming segment: " + reader + ", indexed=" + indexedCount + ", docValues=" + docValuesCount + ", norms=" + normsCount + ", time=" + ((J2N.Time.NanoTime() / J2N.Time.MillisecondsPerNanosecond) - startTime)); // LUCENENET: Use NanoTime() rather than CurrentTimeMilliseconds() for more accurate/reliable results } }
/// <summary> /// NOTE: this method creates a compound file for all files returned by /// info.files(). While, generally, this may include separate norms and /// deletion files, this SegmentInfo must not reference such files when this /// method is called, because they are not allowed within a compound file. /// </summary> public static ICollection<string> CreateCompoundFile(InfoStream infoStream, Directory directory, CheckAbort checkAbort, SegmentInfo info, IOContext context) { string fileName = Index.IndexFileNames.SegmentFileName(info.Name, "", Lucene.Net.Index.IndexFileNames.COMPOUND_FILE_EXTENSION); if (infoStream.IsEnabled("IW")) { infoStream.Message("IW", "create compound file " + fileName); } Debug.Assert(Lucene3xSegmentInfoFormat.GetDocStoreOffset(info) == -1); // Now merge all added files ICollection<string> files = info.Files; CompoundFileDirectory cfsDir = new CompoundFileDirectory(directory, fileName, context, true); IOException prior = null; try { foreach (string file in files) { directory.Copy(cfsDir, file, file, context); checkAbort.Work(directory.FileLength(file)); } } catch (System.IO.IOException ex) { prior = ex; } finally { bool success = false; try { IOUtils.CloseWhileHandlingException(prior, cfsDir); success = true; } finally { if (!success) { try { directory.DeleteFile(fileName); } catch (Exception) { } try { directory.DeleteFile(Lucene.Net.Index.IndexFileNames.SegmentFileName(info.Name, "", Lucene.Net.Index.IndexFileNames.COMPOUND_FILE_ENTRIES_EXTENSION)); } catch (Exception) { } } } } // Replace all previous files with the CFS/CFE files: HashSet<string> siFiles = new HashSet<string>(); siFiles.Add(fileName); siFiles.Add(Lucene.Net.Index.IndexFileNames.SegmentFileName(info.Name, "", Lucene.Net.Index.IndexFileNames.COMPOUND_FILE_ENTRIES_EXTENSION)); info.Files = siFiles; return files; }
internal void Abort(IndexWriter writer) { lock (this) { //Debug.Assert(!Thread.HoldsLock(writer), "IndexWriter lock should never be hold when aborting"); bool success = false; HashSet <string> newFilesSet = new HashSet <string>(); try { deleteQueue.Clear(); if (infoStream.IsEnabled("DW")) { infoStream.Message("DW", "abort"); } int limit = perThreadPool.NumThreadStatesActive; for (int i = 0; i < limit; i++) { ThreadState perThread = perThreadPool.GetThreadState(i); perThread.@Lock(); try { AbortThreadState(perThread, newFilesSet); } finally { perThread.Unlock(); } } flushControl.AbortPendingFlushes(newFilesSet); PutEvent(new DeleteNewFilesEvent(newFilesSet)); flushControl.WaitForFlush(); success = true; } finally { if (infoStream.IsEnabled("DW")) { infoStream.Message("DW", "done abort; abortedFiles=" + Arrays.ToString(newFilesSet) + " success=" + success); } } } }
internal void Abort(IndexWriter writer) { lock (this) { if (Debugging.AssertsEnabled) { Debugging.Assert(!Monitor.IsEntered(writer), () => "IndexWriter lock should never be hold when aborting"); } bool success = false; JCG.HashSet <string> newFilesSet = new JCG.HashSet <string>(); try { deleteQueue.Clear(); if (infoStream.IsEnabled("DW")) { infoStream.Message("DW", "abort"); } int limit = perThreadPool.NumThreadStatesActive; for (int i = 0; i < limit; i++) { ThreadState perThread = perThreadPool.GetThreadState(i); perThread.@Lock(); try { AbortThreadState(perThread, newFilesSet); } finally { perThread.Unlock(); } } flushControl.AbortPendingFlushes(newFilesSet); PutEvent(new DeleteNewFilesEvent(newFilesSet)); flushControl.WaitForFlush(); success = true; } finally { if (infoStream.IsEnabled("DW")) { infoStream.Message("DW", "done abort; abortedFiles=" + string.Format(J2N.Text.StringFormatter.InvariantCulture, "{0}", newFilesSet) + " success=" + success); } } } }
/// <summary> /// Initialize the deleter: find all previous commits in /// the <see cref="Directory"/>, incref the files they reference, call /// the policy to let it delete commits. this will remove /// any files not referenced by any of the commits. </summary> /// <exception cref="IOException"> if there is a low-level IO error </exception> public IndexFileDeleter(Directory directory, IndexDeletionPolicy policy, SegmentInfos segmentInfos, InfoStream infoStream, IndexWriter writer, bool initialIndexExists) { this.infoStream = infoStream; this.writer = writer; string currentSegmentsFile = segmentInfos.GetSegmentsFileName(); if (infoStream.IsEnabled("IFD")) { infoStream.Message("IFD", "init: current segments file is \"" + currentSegmentsFile + "\"; deletionPolicy=" + policy); } this.policy = policy; this.directory = directory; // First pass: walk the files and initialize our ref // counts: long currentGen = segmentInfos.Generation; CommitPoint currentCommitPoint = null; string[] files = null; try { files = directory.ListAll(); } #pragma warning disable 168 catch (DirectoryNotFoundException e) #pragma warning restore 168 { // it means the directory is empty, so ignore it. files = new string[0]; } if (currentSegmentsFile != null) { Regex r = IndexFileNames.CODEC_FILE_PATTERN; foreach (string fileName in files) { if (!fileName.EndsWith("write.lock", StringComparison.Ordinal) && !fileName.Equals(IndexFileNames.SEGMENTS_GEN, StringComparison.Ordinal) && (r.IsMatch(fileName) || fileName.StartsWith(IndexFileNames.SEGMENTS, StringComparison.Ordinal))) { // Add this file to refCounts with initial count 0: GetRefCount(fileName); if (fileName.StartsWith(IndexFileNames.SEGMENTS, StringComparison.Ordinal)) { // this is a commit (segments or segments_N), and // it's valid (<= the max gen). Load it, then // incref all files it refers to: if (infoStream.IsEnabled("IFD")) { infoStream.Message("IFD", "init: load commit \"" + fileName + "\""); } SegmentInfos sis = new SegmentInfos(); try { sis.Read(directory, fileName); } #pragma warning disable 168 catch (FileNotFoundException e) #pragma warning restore 168 { // LUCENE-948: on NFS (and maybe others), if // you have writers switching back and forth // between machines, it's very likely that the // dir listing will be stale and will claim a // file segments_X exists when in fact it // doesn't. So, we catch this and handle it // as if the file does not exist if (infoStream.IsEnabled("IFD")) { infoStream.Message("IFD", "init: hit FileNotFoundException when loading commit \"" + fileName + "\"; skipping this commit point"); } sis = null; } // LUCENENET specific - .NET (thankfully) only has one FileNotFoundException, so we don't need this //catch (NoSuchFileException) //{ // // LUCENE-948: on NFS (and maybe others), if // // you have writers switching back and forth // // between machines, it's very likely that the // // dir listing will be stale and will claim a // // file segments_X exists when in fact it // // doesn't. So, we catch this and handle it // // as if the file does not exist // if (infoStream.IsEnabled("IFD")) // { // infoStream.Message("IFD", "init: hit FileNotFoundException when loading commit \"" + fileName + "\"; skipping this commit point"); // } // sis = null; //} // LUCENENET specific - since NoSuchDirectoryException subclasses FileNotFoundException // in Lucene, we need to catch it here to be on the safe side. catch (System.IO.DirectoryNotFoundException) { // LUCENE-948: on NFS (and maybe others), if // you have writers switching back and forth // between machines, it's very likely that the // dir listing will be stale and will claim a // file segments_X exists when in fact it // doesn't. So, we catch this and handle it // as if the file does not exist if (infoStream.IsEnabled("IFD")) { infoStream.Message("IFD", "init: hit FileNotFoundException when loading commit \"" + fileName + "\"; skipping this commit point"); } sis = null; } catch (IOException /*e*/) { if (SegmentInfos.GenerationFromSegmentsFileName(fileName) <= currentGen && directory.FileLength(fileName) > 0) { throw; // LUCENENET: CA2200: Rethrow to preserve stack details (https://docs.microsoft.com/en-us/visualstudio/code-quality/ca2200-rethrow-to-preserve-stack-details) } else { // Most likely we are opening an index that // has an aborted "future" commit, so suppress // exc in this case sis = null; } } if (sis != null) { CommitPoint commitPoint = new CommitPoint(commitsToDelete, directory, sis); if (sis.Generation == segmentInfos.Generation) { currentCommitPoint = commitPoint; } commits.Add(commitPoint); IncRef(sis, true); if (lastSegmentInfos == null || sis.Generation > lastSegmentInfos.Generation) { lastSegmentInfos = sis; } } } } } } if (currentCommitPoint == null && currentSegmentsFile != null && initialIndexExists) { // We did not in fact see the segments_N file // corresponding to the segmentInfos that was passed // in. Yet, it must exist, because our caller holds // the write lock. this can happen when the directory // listing was stale (eg when index accessed via NFS // client with stale directory listing cache). So we // try now to explicitly open this commit point: SegmentInfos sis = new SegmentInfos(); try { sis.Read(directory, currentSegmentsFile); } catch (IOException e) { throw new CorruptIndexException("failed to locate current segments_N file \"" + currentSegmentsFile + "\"" + e.ToString(), e); } if (infoStream.IsEnabled("IFD")) { infoStream.Message("IFD", "forced open of current segments file " + segmentInfos.GetSegmentsFileName()); } currentCommitPoint = new CommitPoint(commitsToDelete, directory, sis); commits.Add(currentCommitPoint); IncRef(sis, true); } // We keep commits list in sorted order (oldest to newest): CollectionUtil.TimSort(commits); // Now delete anything with ref count at 0. These are // presumably abandoned files eg due to crash of // IndexWriter. foreach (KeyValuePair <string, RefCount> entry in refCounts) { RefCount rc = entry.Value; string fileName = entry.Key; if (0 == rc.count) { if (infoStream.IsEnabled("IFD")) { infoStream.Message("IFD", "init: removing unreferenced file \"" + fileName + "\""); } DeleteFile(fileName); } } // Finally, give policy a chance to remove things on // startup: this.policy.OnInit(commits); // Always protect the incoming segmentInfos since // sometime it may not be the most recent commit Checkpoint(segmentInfos, false); startingCommitDeleted = currentCommitPoint == null ? false : currentCommitPoint.IsDeleted; DeleteCommits(); }
private void Run(CancellationToken cancellationToken) { // First time through the while loop we do the merge // that we were started with: MergePolicy.OneMerge merge = _startingMerge; try { if (_isLoggingEnabled) { _logger.Message(COMPONENT_NAME, " merge thread: start"); } while (true && !cancellationToken.IsCancellationRequested) { RunningMerge = merge; _writer.Merge(merge); // Subsequent times through the loop we do any new // merge that writer says is necessary: merge = _writer.NextMerge(); // Notify here in case any threads were stalled; // they will notice that the pending merge has // been pulled and possibly resume: _resetEvent.Set(); if (merge != null) { if (_isLoggingEnabled) { _logger.Message(COMPONENT_NAME, " merge thread: do another merge " + _writer.SegString(merge.Segments)); } } else { break; } } if (_isLoggingEnabled) { _logger.Message(COMPONENT_NAME, " merge thread: done"); } } catch (Exception exc) { // Ignore the exception if it was due to abort: if (!(exc is MergePolicy.MergeAbortedException)) { //System.out.println(Thread.currentThread().getName() + ": CMS: exc"); //exc.printStackTrace(System.out) _exceptionHandler(exc); } } finally { _isDone = true; if (MergeThreadCompleted != null) { MergeThreadCompleted(this, EventArgs.Empty); } } }
internal bool TestPoint(string message) { if (infoStream.IsEnabled("TP")) { infoStream.Message("TP", message); } return(true); }
/// <summary> /// Initialize the deleter: find all previous commits in /// the Directory, incref the files they reference, call /// the policy to let it delete commits. this will remove /// any files not referenced by any of the commits. </summary> /// <exception cref="IOException"> if there is a low-level IO error </exception> public IndexFileDeleter(Directory directory, IndexDeletionPolicy policy, SegmentInfos segmentInfos, InfoStream infoStream, IndexWriter writer, bool initialIndexExists) { this.InfoStream = infoStream; this.Writer = writer; string currentSegmentsFile = segmentInfos.SegmentsFileName; if (infoStream.IsEnabled("IFD")) { infoStream.Message("IFD", "init: current segments file is \"" + currentSegmentsFile + "\"; deletionPolicy=" + policy); } this.Policy = policy; this.Directory = directory; // First pass: walk the files and initialize our ref // counts: long currentGen = segmentInfos.Generation; CommitPoint currentCommitPoint = null; string[] files = null; try { files = directory.ListAll(); } catch (NoSuchDirectoryException e) { // it means the directory is empty, so ignore it. files = new string[0]; } if (currentSegmentsFile != null) { Regex r = IndexFileNames.CODEC_FILE_PATTERN; foreach (string fileName in files) { if (!fileName.EndsWith("write.lock") && !fileName.Equals(IndexFileNames.SEGMENTS_GEN) && (r.IsMatch(fileName) || fileName.StartsWith(IndexFileNames.SEGMENTS))) { // Add this file to refCounts with initial count 0: GetRefCount(fileName); if (fileName.StartsWith(IndexFileNames.SEGMENTS)) { // this is a commit (segments or segments_N), and // it's valid (<= the max gen). Load it, then // incref all files it refers to: if (infoStream.IsEnabled("IFD")) { infoStream.Message("IFD", "init: load commit \"" + fileName + "\""); } SegmentInfos sis = new SegmentInfos(); try { sis.Read(directory, fileName); } catch (FileNotFoundException e) { // LUCENE-948: on NFS (and maybe others), if // you have writers switching back and forth // between machines, it's very likely that the // dir listing will be stale and will claim a // file segments_X exists when in fact it // doesn't. So, we catch this and handle it // as if the file does not exist if (infoStream.IsEnabled("IFD")) { infoStream.Message("IFD", "init: hit FileNotFoundException when loading commit \"" + fileName + "\"; skipping this commit point"); } sis = null; } catch (IOException e) { if (SegmentInfos.GenerationFromSegmentsFileName(fileName) <= currentGen && directory.FileLength(fileName) > 0) { throw e; } else { // Most likely we are opening an index that // has an aborted "future" commit, so suppress // exc in this case sis = null; } } if (sis != null) { CommitPoint commitPoint = new CommitPoint(CommitsToDelete, directory, sis); if (sis.Generation == segmentInfos.Generation) { currentCommitPoint = commitPoint; } Commits.Add(commitPoint); IncRef(sis, true); if (LastSegmentInfos_Renamed == null || sis.Generation > LastSegmentInfos_Renamed.Generation) { LastSegmentInfos_Renamed = sis; } } } } } } if (currentCommitPoint == null && currentSegmentsFile != null && initialIndexExists) { // We did not in fact see the segments_N file // corresponding to the segmentInfos that was passed // in. Yet, it must exist, because our caller holds // the write lock. this can happen when the directory // listing was stale (eg when index accessed via NFS // client with stale directory listing cache). So we // try now to explicitly open this commit point: SegmentInfos sis = new SegmentInfos(); try { sis.Read(directory, currentSegmentsFile); } catch (IOException e) { throw new CorruptIndexException("failed to locate current segments_N file \"" + currentSegmentsFile + "\""); } if (infoStream.IsEnabled("IFD")) { infoStream.Message("IFD", "forced open of current segments file " + segmentInfos.SegmentsFileName); } currentCommitPoint = new CommitPoint(CommitsToDelete, directory, sis); Commits.Add(currentCommitPoint); IncRef(sis, true); } // We keep commits list in sorted order (oldest to newest): CollectionUtil.TimSort(Commits); // Now delete anything with ref count at 0. These are // presumably abandoned files eg due to crash of // IndexWriter. foreach (KeyValuePair<string, RefCount> entry in RefCounts) { RefCount rc = entry.Value; string fileName = entry.Key; if (0 == rc.Count) { if (infoStream.IsEnabled("IFD")) { infoStream.Message("IFD", "init: removing unreferenced file \"" + fileName + "\""); } DeleteFile(fileName); } } // Finally, give policy a chance to remove things on // startup: Policy.OnInit(Commits); // Always protect the incoming segmentInfos since // sometime it may not be the most recent commit Checkpoint(segmentInfos, false); StartingCommitDeleted = currentCommitPoint == null ? false : currentCommitPoint.Deleted; DeleteCommits(); }