public virtual bool Delete(int docID) { UninterruptableMonitor.Enter(this); try { if (Debugging.AssertsEnabled) { Debugging.Assert(liveDocs != null); Debugging.Assert(UninterruptableMonitor.IsEntered(writer)); Debugging.Assert(docID >= 0 && docID < liveDocs.Length, "out of bounds: docid={0} liveDocsLength={1} seg={2} docCount={3}", docID, liveDocs.Length, Info.Info.Name, Info.Info.DocCount); Debugging.Assert(!liveDocsShared); } bool didDelete = liveDocs.Get(docID); if (didDelete) { ((IMutableBits)liveDocs).Clear(docID); pendingDeleteCount++; //System.out.println(" new del seg=" + info + " docID=" + docID + " pendingDelCount=" + pendingDeleteCount + " totDelCount=" + (info.docCount-liveDocs.count())); } return(didDelete); } finally { UninterruptableMonitor.Exit(this); } }
public virtual void InitWritableLiveDocs() { UninterruptableMonitor.Enter(this); try { if (Debugging.AssertsEnabled) { Debugging.Assert(UninterruptableMonitor.IsEntered(writer)); Debugging.Assert(Info.Info.DocCount > 0); } //System.out.println("initWritableLivedocs seg=" + info + " liveDocs=" + liveDocs + " shared=" + shared); if (liveDocsShared) { // Copy on write: this means we've cloned a // SegmentReader sharing the current liveDocs // instance; must now make a private clone so we can // change it: LiveDocsFormat liveDocsFormat = Info.Info.Codec.LiveDocsFormat; if (liveDocs == null) { //System.out.println("create BV seg=" + info); liveDocs = liveDocsFormat.NewLiveDocs(Info.Info.DocCount); } else { liveDocs = liveDocsFormat.NewLiveDocs(liveDocs); } liveDocsShared = false; } } finally { UninterruptableMonitor.Exit(this); } }
internal void Abort(IndexWriter writer) { UninterruptableMonitor.Enter(this); try { if (Debugging.AssertsEnabled) { Debugging.Assert(!UninterruptableMonitor.IsEntered(writer), "IndexWriter lock should never be hold when aborting"); } bool success = false; JCG.HashSet <string> newFilesSet = new JCG.HashSet <string>(); try { deleteQueue.Clear(); if (infoStream.IsEnabled("DW")) { infoStream.Message("DW", "abort"); } int limit = perThreadPool.NumThreadStatesActive; for (int i = 0; i < limit; i++) { ThreadState perThread = perThreadPool.GetThreadState(i); perThread.@Lock(); try { AbortThreadState(perThread, newFilesSet); } finally { perThread.Unlock(); } } flushControl.AbortPendingFlushes(newFilesSet); PutEvent(new DeleteNewFilesEvent(newFilesSet)); flushControl.WaitForFlush(); success = true; } finally { if (infoStream.IsEnabled("DW")) { infoStream.Message("DW", "done abort; abortedFiles=" + string.Format(J2N.Text.StringFormatter.InvariantCulture, "{0}", newFilesSet) + " success=" + success); } } } finally { UninterruptableMonitor.Exit(this); } }
/// <summary> /// For definition of "check point" see <see cref="IndexWriter"/> comments: /// "Clarification: Check Points (and commits)". /// <para/> /// Writer calls this when it has made a "consistent /// change" to the index, meaning new files are written to /// the index and the in-memory <see cref="SegmentInfos"/> have been /// modified to point to those files. /// <para/> /// This may or may not be a commit (segments_N may or may /// not have been written). /// <para/> /// We simply incref the files referenced by the new /// <see cref="SegmentInfos"/> and decref the files we had previously /// seen (if any). /// <para/> /// If this is a commit, we also call the policy to give it /// a chance to remove other commits. If any commits are /// removed, we decref their files as well. /// </summary> public void Checkpoint(SegmentInfos segmentInfos, bool isCommit) { if (Debugging.AssertsEnabled) { Debugging.Assert(IsLocked); Debugging.Assert(UninterruptableMonitor.IsEntered(writer)); } long t0 = 0; if (infoStream.IsEnabled("IFD")) { t0 = J2N.Time.NanoTime(); infoStream.Message("IFD", "now checkpoint \"" + writer.SegString(writer.ToLiveInfos(segmentInfos).Segments) + "\" [" + segmentInfos.Count + " segments " + "; isCommit = " + isCommit + "]"); } // Try again now to delete any previously un-deletable // files (because they were in use, on Windows): DeletePendingFiles(); // Incref the files: IncRef(segmentInfos, isCommit); if (isCommit) { // Append to our commits list: commits.Add(new CommitPoint(commitsToDelete, directory, segmentInfos)); // Tell policy so it can remove commits: policy.OnCommit(commits); // Decref files for commits that were deleted by the policy: DeleteCommits(); } else { // DecRef old files from the last checkpoint, if any: DecRef(lastFiles); lastFiles.Clear(); // Save files so we can decr on next checkpoint/commit: lastFiles.AddRange(segmentInfos.GetFiles(directory, false)); } if (infoStream.IsEnabled("IFD")) { long t1 = J2N.Time.NanoTime(); infoStream.Message("IFD", ((t1 - t0) / 1000000) + " msec to checkpoint"); } }
internal virtual int ForcePurge(IndexWriter writer) { if (Debugging.AssertsEnabled) { Debugging.Assert(!UninterruptableMonitor.IsEntered(this)); Debugging.Assert(!UninterruptableMonitor.IsEntered(writer)); } purgeLock.@Lock(); try { return(InnerPurge(writer)); } finally { purgeLock.Unlock(); } }
private bool UpdateStallState() { if (Debugging.AssertsEnabled) { Debugging.Assert(UninterruptableMonitor.IsEntered(this)); } long limit = StallLimitBytes; /* * we block indexing threads if net byte grows due to slow flushes * yet, for small ram buffers and large documents we can easily * reach the limit without any ongoing flushes. we need to ensure * that we don't stall/block if an ongoing or pending flush can * not free up enough memory to release the stall lock. */ bool stall = ((activeBytes + flushBytes) > limit) && (activeBytes < limit) && !closed; stallControl.UpdateStalled(stall); return(stall); }
public virtual IBits GetReadOnlyLiveDocs() { UninterruptableMonitor.Enter(this); try { //System.out.println("getROLiveDocs seg=" + info); if (Debugging.AssertsEnabled) { Debugging.Assert(UninterruptableMonitor.IsEntered(writer)); } liveDocsShared = true; //if (liveDocs != null) { //System.out.println(" liveCount=" + liveDocs.count()); //} return(liveDocs); } finally { UninterruptableMonitor.Exit(this); } }
private DocumentsWriterPerThread InternalTryCheckOutForFlush(ThreadState perThread) { if (Debugging.AssertsEnabled) { // LUCENENET specific - Since we need to mimic the unfair behavior of ReentrantLock, we need to ensure that all threads that enter here hold the lock. Debugging.Assert(perThread.IsHeldByCurrentThread); Debugging.Assert(UninterruptableMonitor.IsEntered(this)); Debugging.Assert(perThread.flushPending); } try { // LUCENENET specific - We removed the call to perThread.TryLock() and the try-finally below as they are no longer needed. // We are pending so all memory is already moved to flushBytes if (perThread.IsInitialized) { if (Debugging.AssertsEnabled) { Debugging.Assert(perThread.IsHeldByCurrentThread); } DocumentsWriterPerThread dwpt; long bytes = perThread.bytesUsed; // do that before // replace! dwpt = DocumentsWriterPerThreadPool.Reset(perThread, closed); // LUCENENET specific - made method static per CA1822 if (Debugging.AssertsEnabled) { Debugging.Assert(!flushingWriters.ContainsKey(dwpt), "DWPT is already flushing"); } // Record the flushing DWPT to reduce flushBytes in doAfterFlush flushingWriters[dwpt] = bytes; numPending--; // write access synced return(dwpt); } return(null); } finally { UpdateStallState(); } }
/// <summary> /// Returns a reader for merge. this method applies field updates if there are /// any and marks that this segment is currently merging. /// </summary> internal virtual SegmentReader GetReaderForMerge(IOContext context) { UninterruptableMonitor.Enter(this); try { if (Debugging.AssertsEnabled) { Debugging.Assert(UninterruptableMonitor.IsEntered(writer)); } // must execute these two statements as atomic operation, otherwise we // could lose updates if e.g. another thread calls writeFieldUpdates in // between, or the updates are applied to the obtained reader, but then // re-applied in IW.commitMergedDeletes (unnecessary work and potential // bugs). isMerging = true; return(GetReader(context)); } finally { UninterruptableMonitor.Exit(this); } }
public virtual void WriteFieldUpdates(Directory dir, DocValuesFieldUpdates.Container dvUpdates) { UninterruptableMonitor.Enter(this); try { if (Debugging.AssertsEnabled) { Debugging.Assert(UninterruptableMonitor.IsEntered(writer)); } //System.out.println("rld.writeFieldUpdates: seg=" + info + " numericFieldUpdates=" + numericFieldUpdates); if (Debugging.AssertsEnabled) { Debugging.Assert(dvUpdates.Any()); } // Do this so we can delete any created files on // exception; this saves all codecs from having to do // it: TrackingDirectoryWrapper trackingDir = new TrackingDirectoryWrapper(dir); FieldInfos fieldInfos = null; bool success = false; try { Codec codec = Info.Info.Codec; // reader could be null e.g. for a just merged segment (from // IndexWriter.commitMergedDeletes). SegmentReader reader = this.reader ?? new SegmentReader(Info, writer.Config.ReaderTermsIndexDivisor, IOContext.READ_ONCE); try { // clone FieldInfos so that we can update their dvGen separately from // the reader's infos and write them to a new fieldInfos_gen file FieldInfos.Builder builder = new FieldInfos.Builder(writer.globalFieldNumberMap); // cannot use builder.add(reader.getFieldInfos()) because it does not // clone FI.attributes as well FI.dvGen foreach (FieldInfo fi in reader.FieldInfos) { FieldInfo clone = builder.Add(fi); // copy the stuff FieldInfos.Builder doesn't copy if (fi.Attributes != null) { foreach (KeyValuePair <string, string> e in fi.Attributes) { clone.PutAttribute(e.Key, e.Value); } } clone.DocValuesGen = fi.DocValuesGen; } // create new fields or update existing ones to have NumericDV type foreach (string f in dvUpdates.numericDVUpdates.Keys) { builder.AddOrUpdate(f, NumericDocValuesField.TYPE); } // create new fields or update existing ones to have BinaryDV type foreach (string f in dvUpdates.binaryDVUpdates.Keys) { builder.AddOrUpdate(f, BinaryDocValuesField.TYPE); } fieldInfos = builder.Finish(); long nextFieldInfosGen = Info.NextFieldInfosGen; // LUCENENET specific: We created the segments names wrong in 4.8.0-beta00001 - 4.8.0-beta00015, // so we added a switch to be able to read these indexes in later versions. This logic as well as an // optimization on the first 100 segment values is implmeneted in SegmentInfos.SegmentNumberToString(). string segmentSuffix = SegmentInfos.SegmentNumberToString(nextFieldInfosGen); SegmentWriteState state = new SegmentWriteState(null, trackingDir, Info.Info, fieldInfos, writer.Config.TermIndexInterval, null, IOContext.DEFAULT, segmentSuffix); DocValuesFormat docValuesFormat = codec.DocValuesFormat; DocValuesConsumer fieldsConsumer = docValuesFormat.FieldsConsumer(state); bool fieldsConsumerSuccess = false; try { // System.out.println("[" + Thread.currentThread().getName() + "] RLD.writeFieldUpdates: applying numeric updates; seg=" + info + " updates=" + numericFieldUpdates); foreach (KeyValuePair <string, NumericDocValuesFieldUpdates> e in dvUpdates.numericDVUpdates) { string field = e.Key; NumericDocValuesFieldUpdates fieldUpdates = e.Value; FieldInfo fieldInfo = fieldInfos.FieldInfo(field); if (Debugging.AssertsEnabled) { Debugging.Assert(fieldInfo != null); } fieldInfo.DocValuesGen = nextFieldInfosGen; // write the numeric updates to a new gen'd docvalues file fieldsConsumer.AddNumericField(fieldInfo, GetInt64Enumerable(reader, field, fieldUpdates)); } // System.out.println("[" + Thread.currentThread().getName() + "] RAU.writeFieldUpdates: applying binary updates; seg=" + info + " updates=" + dvUpdates.binaryDVUpdates); foreach (KeyValuePair <string, BinaryDocValuesFieldUpdates> e in dvUpdates.binaryDVUpdates) { string field = e.Key; BinaryDocValuesFieldUpdates dvFieldUpdates = e.Value; FieldInfo fieldInfo = fieldInfos.FieldInfo(field); if (Debugging.AssertsEnabled) { Debugging.Assert(fieldInfo != null); } // System.out.println("[" + Thread.currentThread().getName() + "] RAU.writeFieldUpdates: applying binary updates; seg=" + info + " f=" + dvFieldUpdates + ", updates=" + dvFieldUpdates); fieldInfo.DocValuesGen = nextFieldInfosGen; // write the numeric updates to a new gen'd docvalues file fieldsConsumer.AddBinaryField(fieldInfo, GetBytesRefEnumerable(reader, field, dvFieldUpdates)); } codec.FieldInfosFormat.FieldInfosWriter.Write(trackingDir, Info.Info.Name, segmentSuffix, fieldInfos, IOContext.DEFAULT); fieldsConsumerSuccess = true; } finally { if (fieldsConsumerSuccess) { fieldsConsumer.Dispose(); } else { IOUtils.DisposeWhileHandlingException(fieldsConsumer); } } } finally { if (reader != this.reader) { // System.out.println("[" + Thread.currentThread().getName() + "] RLD.writeLiveDocs: closeReader " + reader); reader.Dispose(); } } success = true; } finally { if (!success) { // Advance only the nextWriteDocValuesGen so that a 2nd // attempt to write will write to a new file Info.AdvanceNextWriteFieldInfosGen(); // Delete any partially created file(s): foreach (string fileName in trackingDir.CreatedFiles) { try { dir.DeleteFile(fileName); } catch (Exception t) when(t.IsThrowable()) { // Ignore so we throw only the first exc } } } } Info.AdvanceFieldInfosGen(); // copy all the updates to mergingUpdates, so they can later be applied to the merged segment if (isMerging) { foreach (KeyValuePair <string, NumericDocValuesFieldUpdates> e in dvUpdates.numericDVUpdates) { if (!mergingDVUpdates.TryGetValue(e.Key, out DocValuesFieldUpdates updates)) { mergingDVUpdates[e.Key] = e.Value; } else { updates.Merge(e.Value); } } foreach (KeyValuePair <string, BinaryDocValuesFieldUpdates> e in dvUpdates.binaryDVUpdates) { if (!mergingDVUpdates.TryGetValue(e.Key, out DocValuesFieldUpdates updates)) { mergingDVUpdates[e.Key] = e.Value; } else { updates.Merge(e.Value); } } } // create a new map, keeping only the gens that are in use IDictionary <long, ISet <string> > genUpdatesFiles = Info.UpdatesFiles; IDictionary <long, ISet <string> > newGenUpdatesFiles = new Dictionary <long, ISet <string> >(); long fieldInfosGen = Info.FieldInfosGen; foreach (FieldInfo fi in fieldInfos) { long dvGen = fi.DocValuesGen; if (dvGen != -1 && !newGenUpdatesFiles.ContainsKey(dvGen)) { if (dvGen == fieldInfosGen) { newGenUpdatesFiles[fieldInfosGen] = trackingDir.CreatedFiles; } else { newGenUpdatesFiles[dvGen] = genUpdatesFiles[dvGen]; } } } Info.SetGenUpdatesFiles(newGenUpdatesFiles); // wrote new files, should checkpoint() writer.Checkpoint(); // if there is a reader open, reopen it to reflect the updates if (reader != null) { SegmentReader newReader = new SegmentReader(Info, reader, liveDocs, Info.Info.DocCount - Info.DelCount - pendingDeleteCount); bool reopened = false; try { reader.DecRef(); reader = newReader; reopened = true; } finally { if (!reopened) { newReader.DecRef(); } } } } finally { UninterruptableMonitor.Exit(this); } }
public virtual bool WriteLiveDocs(Directory dir) { UninterruptableMonitor.Enter(this); try { if (Debugging.AssertsEnabled) { Debugging.Assert(UninterruptableMonitor.IsEntered(writer)); } //System.out.println("rld.writeLiveDocs seg=" + info + " pendingDelCount=" + pendingDeleteCount + " numericUpdates=" + numericUpdates); if (pendingDeleteCount == 0) { return(false); } // We have new deletes if (Debugging.AssertsEnabled) { Debugging.Assert(liveDocs.Length == Info.Info.DocCount); } // Do this so we can delete any created files on // exception; this saves all codecs from having to do // it: TrackingDirectoryWrapper trackingDir = new TrackingDirectoryWrapper(dir); // We can write directly to the actual name (vs to a // .tmp & renaming it) because the file is not live // until segments file is written: bool success = false; try { Codec codec = Info.Info.Codec; codec.LiveDocsFormat.WriteLiveDocs((IMutableBits)liveDocs, trackingDir, Info, pendingDeleteCount, IOContext.DEFAULT); success = true; } finally { if (!success) { // Advance only the nextWriteDelGen so that a 2nd // attempt to write will write to a new file Info.AdvanceNextWriteDelGen(); // Delete any partially created file(s): foreach (string fileName in trackingDir.CreatedFiles) { try { dir.DeleteFile(fileName); } catch (Exception t) when(t.IsThrowable()) { // Ignore so we throw only the first exc } } } } // If we hit an exc in the line above (eg disk full) // then info's delGen remains pointing to the previous // (successfully written) del docs: Info.AdvanceDelGen(); Info.DelCount = Info.DelCount + pendingDeleteCount; pendingDeleteCount = 0; return(true); } finally { UninterruptableMonitor.Exit(this); } }
public override void Merge(IndexWriter writer, MergeTrigger trigger, bool newMergesFound) { UninterruptableMonitor.Enter(this); try { if (Debugging.AssertsEnabled) { Debugging.Assert(!UninterruptableMonitor.IsEntered(writer)); } this.m_writer = writer; InitMergeThreadPriority(); m_dir = writer.Directory; // First, quickly run through the newly proposed merges // and add any orthogonal merges (ie a merge not // involving segments already pending to be merged) to // the queue. If we are way behind on merging, many of // these newly proposed merges will likely already be // registered. if (IsVerbose) { Message("now merge"); Message(" index: " + writer.SegString()); } // Iterate, pulling from the IndexWriter's queue of // pending merges, until it's empty: while (true) { long startStallTime = 0; while (writer.HasPendingMerges() && MergeThreadCount >= maxMergeCount) { // this means merging has fallen too far behind: we // have already created maxMergeCount threads, and // now there's at least one more merge pending. // Note that only maxThreadCount of // those created merge threads will actually be // running; the rest will be paused (see // updateMergeThreads). We stall this producer // thread to prevent creation of new segments, // until merging has caught up: startStallTime = J2N.Time.NanoTime() / J2N.Time.MillisecondsPerNanosecond; // LUCENENET: Use NanoTime() rather than CurrentTimeMilliseconds() for more accurate/reliable results if (IsVerbose) { Message(" too many merges; stalling..."); } try { UninterruptableMonitor.Wait(this); } catch (Exception ie) when(ie.IsInterruptedException()) { throw new Util.ThreadInterruptedException(ie); } } if (IsVerbose) { if (startStallTime != 0) { Message(" stalled for " + ((J2N.Time.NanoTime() / J2N.Time.MillisecondsPerNanosecond) - startStallTime) + " msec"); // LUCENENET: Use NanoTime() rather than CurrentTimeMilliseconds() for more accurate/reliable results } } MergePolicy.OneMerge merge = writer.NextMerge(); if (merge == null) { if (IsVerbose) { Message(" no more merges pending; now return"); } return; } bool success = false; try { if (IsVerbose) { Message(" consider merge " + writer.SegString(merge.Segments)); } // OK to spawn a new merge thread to handle this // merge: MergeThread merger = GetMergeThread(writer, merge); m_mergeThreads.Add(merger); if (IsVerbose) { Message(" launch new thread [" + merger.Name + "]"); } merger.Start(); // Must call this after starting the thread else // the new thread is removed from mergeThreads // (since it's not alive yet): UpdateMergeThreads(); success = true; } finally { if (!success) { writer.MergeFinish(merge); } } } } finally { UninterruptableMonitor.Exit(this); } }
public override void Run() { while (!finish) { try { TransactionalMethod(); TransactionalMethod(); InterruptableMethod(); TransactionalMethod(); InterruptableMethod(); // Make sure these don't throw System.Threading.ThreadInterruptedException Assert.IsFalse(UninterruptableMonitor.IsEntered(lock1)); Assert.IsFalse(UninterruptableMonitor.IsEntered(lock2)); Assert.IsFalse(UninterruptableMonitor.IsEntered(lock3)); if (UninterruptableMonitor.TryEnter(lock1)) { try { Assert.IsTrue(UninterruptableMonitor.IsEntered(lock1)); } finally { UninterruptableMonitor.Exit(lock1); } } allowInterrupt = true; } catch (Util.ThreadInterruptedException re) { // Success - we received the correct exception type Console.WriteLine("TEST: got interrupt"); Console.WriteLine(GetToStringFrom(re)); Exception e = re.InnerException; Assert.IsTrue(e is System.Threading.ThreadInterruptedException); // Make sure we didn't interrupt in the middle of a transaction Assert.IsFalse(transactionInProgress); if (finish) { break; } } catch (Exception t) when(t.IsThrowable()) { Console.WriteLine("FAILED; unexpected exception"); Console.WriteLine(GetToStringFrom(t)); // Make sure we didn't error in the middle of a transaction Assert.IsFalse(transactionInProgress); failed = true; break; } } }