public override void Run() { //#if !NETSTANDARD1_6 // try // { //#endif Latch.Wait(); //#if !NETSTANDARD1_6 // } // catch (ThreadInterruptedException e) // LUCENENET NOTE: Senseless to catch and rethrow the same exception type // { // throw new ThreadInterruptedException("Thread Interrupted Exception", e); // } //#endif int i = 0; while ((i = Index.GetAndIncrement()) < Ids.Length) { Term term = new Term("id", Ids[i].ToString()); Queue.Add(term, Slice); Assert.IsTrue(Slice.IsTailItem(term)); Slice.Apply(Deletes, BufferedUpdates.MAX_INT32); } }
public override void Run() { //#if FEATURE_THREAD_INTERRUPT // try // { //#endif latch.Wait(); //#if FEATURE_THREAD_INTERRUPT // } // catch (ThreadInterruptedException e) // LUCENENET NOTE: Senseless to catch and rethrow the same exception type // { // throw new ThreadInterruptedException("Thread Interrupted Exception", e); // } //#endif int i = 0; while ((i = index.GetAndIncrement()) < ids.Length) { Term term = new Term("id", ids[i].ToString()); queue.Add(term, slice); Assert.IsTrue(slice.IsTailItem(term)); slice.Apply(deletes, BufferedUpdates.MAX_INT32); } }
public override void Run() { latch.Wait(); // LUCENENET NOTE: No need to catch and rethrow same excepton type ThreadInterruptedException int i = 0; while ((i = index.GetAndIncrement()) < ids.Length) { Term term = new Term("id", ids[i].ToString()); queue.Add(term, slice); Assert.IsTrue(slice.IsTailItem(term)); slice.Apply(deletes, BufferedUpdates.MAX_INT32); } }
public override void Run() { try { Latch.Wait(); } catch (ThreadInterruptedException e) { throw new ThreadInterruptedException("Thread Interrupted Exception", e); } int i = 0; while ((i = Index.GetAndIncrement()) < Ids.Length) { Term term = new Term("id", Ids[i].ToString()); Queue.Add(term, Slice); Assert.IsTrue(Slice.IsTailItem(term)); Slice.Apply(Deletes, BufferedUpdates.MAX_INT); } }
public override void Run() { try { latch.Wait(); } catch (Exception ie) when(ie.IsInterruptedException()) { throw new Util.ThreadInterruptedException(ie); } int i = 0; while ((i = index.GetAndIncrement()) < ids.Length) { Term term = new Term("id", ids[i].ToString()); queue.Add(term, slice); Assert.IsTrue(slice.IsTailItem(term)); slice.Apply(deletes, BufferedUpdates.MAX_INT32); } }
public virtual int UpdateDocuments(IEnumerable <IEnumerable <IIndexableField> > docs, Analyzer analyzer, Term delTerm) { if (Debugging.AssertsEnabled) { Debugging.Assert(TestPoint("DocumentsWriterPerThread addDocuments start")); Debugging.Assert(deleteQueue != null); } docState.analyzer = analyzer; if (INFO_VERBOSE && infoStream.IsEnabled("DWPT")) { infoStream.Message("DWPT", Thread.CurrentThread.Name + " update delTerm=" + delTerm + " docID=" + docState.docID + " seg=" + segmentInfo.Name); } int docCount = 0; bool allDocsIndexed = false; try { foreach (IEnumerable <IIndexableField> doc in docs) { docState.doc = doc; docState.docID = numDocsInRAM; docCount++; bool success = false; try { consumer.ProcessDocument(fieldInfos); success = true; } finally { if (!success) { // An exc is being thrown... if (!aborting) { // Incr here because finishDocument will not // be called (because an exc is being thrown): numDocsInRAM++; } else { Abort(filesToDelete); } } } success = false; try { consumer.FinishDocument(); success = true; } finally { if (!success) { Abort(filesToDelete); } } FinishDocument(null); } allDocsIndexed = true; // Apply delTerm only after all indexing has // succeeded, but apply it only to docs prior to when // this batch started: if (delTerm != null) { deleteQueue.Add(delTerm, deleteSlice); if (Debugging.AssertsEnabled) { Debugging.Assert(deleteSlice.IsTailItem(delTerm), "expected the delete term as the tail item"); } deleteSlice.Apply(pendingUpdates, numDocsInRAM - docCount); } } finally { if (!allDocsIndexed && !aborting) { // the iterator threw an exception that is not aborting // go and mark all docs from this block as deleted int docID = numDocsInRAM - 1; int endDocID = docID - docCount; while (docID > endDocID) { DeleteDocID(docID); docID--; } } docState.Clear(); } return(docCount); }