private void Open(Random random) { UninterruptableMonitor.Enter(syncLock); try { Stream @is = null; bool needSkip = true, isExternal = false; long size = 0L, seekTo = 0L; try { // LUCENENET: We have embedded the default file, so if that filename is passed, // open the local resource instead of an external file. if (path == LuceneTestCase.DEFAULT_LINE_DOCS_FILE) { @is = this.GetType().FindAndGetManifestResourceStream(path); } else { isExternal = true; } } catch (Exception) { isExternal = true; } if (isExternal) { // if its not in classpath, we load it as absolute filesystem path (e.g. Hudson's home dir) FileInfo file = new FileInfo(path); size = file.Length; if (path.EndsWith(".gz", StringComparison.Ordinal)) { // if it is a gzip file, we need to use InputStream and slowly skipTo: @is = new FileStream(file.FullName, FileMode.Open, FileAccess.Read, FileShare.Read); } else { // optimized seek using RandomAccessFile: seekTo = RandomSeekPos(random, size); if (LuceneTestCase.Verbose) { Console.WriteLine($"TEST: LineFileDocs: file seek to fp={seekTo} on open"); } @is = new BufferedStream(new FileStream(path, FileMode.Open, FileAccess.Read, FileShare.Read) { Position = seekTo }); needSkip = false; } } else { // if the file comes from Classpath: size = @is.Length;// available(); } if (path.EndsWith(".gz", StringComparison.Ordinal)) { @is = PrepareGZipStream(@is); // guestimate: size = (long)(size * 2.8); } // If we only have an InputStream, we need to seek now, // but this seek is a scan, so very inefficient!!! if (needSkip) { seekTo = RandomSeekPos(random, size); if (LuceneTestCase.Verbose) { Console.WriteLine($"TEST: LineFileDocs: stream skip to fp={seekTo} on open"); } @is.Position = seekTo; } // if we seeked somewhere, read until newline char if (seekTo > 0L) { int b; do { b = @is.ReadByte(); } while (b >= 0 && b != 13 && b != 10); } reader = new StreamReader(@is, Encoding.UTF8, detectEncodingFromByteOrderMarks: false, bufferSize: BUFFER_SIZE); if (seekTo > 0L) { // read one more line, to make sure we are not inside a Windows linebreak (\r\n): reader.ReadLine(); } } finally { UninterruptableMonitor.Exit(syncLock); } }
// LUCENENET: Called in one place, but since there is no implementation it is just wasted CPU //internal void Recycle(DocumentsWriterPerThread dwpt) //{ // // don't recycle DWPT by default //} // you cannot subclass this without being in o.a.l.index package anyway, so // the class is already pkg-private... fix me: see LUCENE-4013 public ThreadState GetAndLock(/* Thread requestingThread, DocumentsWriter documentsWriter // LUCENENET: Not referenced */) { ThreadState threadState = null; UninterruptableMonitor.Enter(this); try { for (;;) { if (freeCount > 0) { // Important that we are LIFO here! This way if number of concurrent indexing threads was once high, but has now reduced, we only use a // limited number of thread states: threadState = freeList[freeCount - 1]; if (threadState.dwpt is null) { // This thread-state is not initialized, e.g. it // was just flushed. See if we can instead find // another free thread state that already has docs // indexed. This way if incoming thread concurrency // has decreased, we don't leave docs // indefinitely buffered, tying up RAM. This // will instead get those thread states flushed, // freeing up RAM for larger segment flushes: for (int i = 0; i < freeCount; i++) { if (freeList[i].dwpt != null) { // Use this one instead, and swap it with // the un-initialized one: ThreadState ts = freeList[i]; freeList[i] = threadState; threadState = ts; break; } } } freeCount--; break; } else if (NumThreadStatesActive < threadStates.Length) { // ThreadState is already locked before return by this method: return(NewThreadState()); } else { // Wait until a thread state frees up: try { UninterruptableMonitor.Wait(this); } catch (Exception ie) when(ie.IsInterruptedException()) { throw new Util.ThreadInterruptedException(ie); } } } } finally { UninterruptableMonitor.Exit(this); } // This could take time, e.g. if the threadState is [briefly] checked for flushing: threadState.Lock(); return(threadState); }
void ICollection.CopyTo(Array array, int index) { if (array is null) { throw new ArgumentNullException(nameof(array)); } if (array.Rank != 1) { throw new ArgumentException("Only single dimensional arrays are supported for the requested action.", nameof(array)); } //throw new ArgumentException(SR.Arg_RankMultiDimNotSupported, nameof(array)); if (array.GetLowerBound(0) != 0) { throw new ArgumentException("The lower bound of target array must be zero.", nameof(array)); } //throw new ArgumentException(SR.Arg_NonZeroLowerBound, nameof(array)); if (index < 0) { throw new ArgumentOutOfRangeException(nameof(index), index, "Non-negative number required."); } //throw new ArgumentOutOfRangeException(nameof(index), index, SR.ArgumentOutOfRange_NeedNonNegNum); if (array.Length - index < Count) { throw new ArgumentException("Destination array is not long enough to copy all the items in the collection. Check array index and length."); } //throw new ArgumentException(SR.Arg_ArrayPlusOffTooSmall); #pragma warning disable IDE0019 // Use pattern matching T[]? tarray = array as T[]; #pragma warning restore IDE0019 // Use pattern matching if (tarray != null) { CopyTo(tarray, index); } else { #pragma warning disable IDE0019 // Use pattern matching object?[]? objects = array as object[]; #pragma warning restore IDE0019 // Use pattern matching if (objects is null) { throw new ArgumentException("Target array type is not compatible with the type of items in the collection.", nameof(array)); //throw new ArgumentException(SR.Argument_InvalidArrayType, nameof(array)); } try { UninterruptableMonitor.Enter(SyncRoot); try { foreach (var item in set) { objects[index++] = item; } } finally { UninterruptableMonitor.Exit(SyncRoot); } } catch (ArrayTypeMismatchException) { throw new ArgumentException("Target array type is not compatible with the type of items in the collection.", nameof(array)); //throw new ArgumentException(SR.Argument_InvalidArrayType, nameof(array)); } } }
internal void MarkForFullFlush() { DocumentsWriterDeleteQueue flushingQueue; UninterruptableMonitor.Enter(this); try { if (Debugging.AssertsEnabled) { Debugging.Assert(!fullFlush, "called DWFC#markForFullFlush() while full flush is still running"); Debugging.Assert(fullFlushBuffer.Count == 0, "full flush buffer should be empty: {0}", fullFlushBuffer); } fullFlush = true; flushingQueue = documentsWriter.deleteQueue; // Set a new delete queue - all subsequent DWPT will use this queue until // we do another full flush DocumentsWriterDeleteQueue newQueue = new DocumentsWriterDeleteQueue(flushingQueue.generation + 1); documentsWriter.deleteQueue = newQueue; } finally { UninterruptableMonitor.Exit(this); } int limit = perThreadPool.NumThreadStatesActive; for (int i = 0; i < limit; i++) { ThreadState next = perThreadPool.GetThreadState(i); next.@Lock(); try { if (!next.IsInitialized) { if (closed && next.IsActive) { DocumentsWriterPerThreadPool.DeactivateThreadState(next); // LUCENENET specific - made method static per CA1822 } continue; } if (Debugging.AssertsEnabled) { Debugging.Assert(next.dwpt.deleteQueue == flushingQueue || next.dwpt.deleteQueue == documentsWriter.deleteQueue, " flushingQueue: {0} currentqueue: {1} perThread queue: {2} numDocsInRam: {3}", flushingQueue, documentsWriter.deleteQueue, next.dwpt.deleteQueue, next.dwpt.NumDocsInRAM); } if (next.dwpt.deleteQueue != flushingQueue) { // this one is already a new DWPT continue; } AddFlushableState(next); } finally { next.Unlock(); } } UninterruptableMonitor.Enter(this); try { /* make sure we move all DWPT that are where concurrently marked as * pending and moved to blocked are moved over to the flushQueue. There is * a chance that this happens since we marking DWPT for full flush without * blocking indexing.*/ PruneBlockedQueue(flushingQueue); if (Debugging.AssertsEnabled) { Debugging.Assert(AssertBlockedFlushes(documentsWriter.deleteQueue)); } //FlushQueue.AddAll(FullFlushBuffer); foreach (var dwpt in fullFlushBuffer) { flushQueue.Enqueue(dwpt); } fullFlushBuffer.Clear(); UpdateStallState(); } finally { UninterruptableMonitor.Exit(this); } if (Debugging.AssertsEnabled) { Debugging.Assert(AssertActiveDeleteQueue(documentsWriter.deleteQueue)); } }
private bool AddInternal(T item, int hashcode, bool acquireLock) { while (true) { var tables = _tables; GetBucketAndLockNo(hashcode, out int bucketNo, out int lockNo, tables.Buckets.Length, tables.Locks.Length); var resizeDesired = false; var lockTaken = false; try { if (acquireLock) { UninterruptableMonitor.Enter(tables.Locks[lockNo], ref lockTaken); } // If the table just got resized, we may not be holding the right lock, and must retry. // This should be a rare occurrence. if (tables != _tables) { continue; } // Try to find this item in the bucket Node previous = null; for (var current = tables.Buckets[bucketNo]; current != null; current = current.Next) { Debug.Assert(previous is null && current == tables.Buckets[bucketNo] || previous.Next == current); if (hashcode == current.Hashcode && _comparer.Equals(current.Item, item)) { return(false); } previous = current; } // The item was not found in the bucket. Insert the new item. Volatile.Write(ref tables.Buckets[bucketNo], new Node(item, hashcode, tables.Buckets[bucketNo])); checked { tables.CountPerLock[lockNo]++; } // // If the number of elements guarded by this lock has exceeded the budget, resize the bucket table. // It is also possible that GrowTable will increase the budget but won't resize the bucket table. // That happens if the bucket table is found to be poorly utilized due to a bad hash function. // if (tables.CountPerLock[lockNo] > _budget) { resizeDesired = true; } } finally { if (lockTaken) { UninterruptableMonitor.Exit(tables.Locks[lockNo]); } } // // The fact that we got here means that we just performed an insertion. If necessary, we will grow the table. // // Concurrency notes: // - Notice that we are not holding any locks at when calling GrowTable. This is necessary to prevent deadlocks. // - As a result, it is possible that GrowTable will be called unnecessarily. But, GrowTable will obtain lock 0 // and then verify that the table we passed to it as the argument is still the current table. // if (resizeDesired) { GrowTable(tables); } return(true); } }
public override void Run() { // TODO: maybe use private thread ticktock timer, in // case clock shift messes up nanoTime? // LUCENENET NOTE: Time.NanoTime() is not affected by clock changes. long lastReopenStartNS = Time.NanoTime(); //System.out.println("reopen: start"); while (!finish) { // TODO: try to guestimate how long reopen might // take based on past data? // Loop until we've waiting long enough before the // next reopen: while (!finish) { try { // Need lock before finding out if has waiting bool hasWaiting; UninterruptableMonitor.Enter(this); try { // True if we have someone waiting for reopened searcher: hasWaiting = waitingGen > searchingGen; } finally { UninterruptableMonitor.Exit(this); } long nextReopenStartNS = lastReopenStartNS + (hasWaiting ? targetMinStaleNS : targetMaxStaleNS); long sleepNS = nextReopenStartNS - Time.NanoTime(); if (sleepNS > 0) { reopenCond.WaitOne(TimeSpan.FromMilliseconds(sleepNS / Time.MillisecondsPerNanosecond));//Convert NS to MS } else { break; } } catch (Exception ie) when(ie.IsInterruptedException()) { Thread.CurrentThread.Interrupt(); return; } } if (finish) { break; } lastReopenStartNS = Time.NanoTime(); // Save the gen as of when we started the reopen; the // listener (HandleRefresh above) copies this to // searchingGen once the reopen completes: refreshStartGen.Value = writer.GetAndIncrementGeneration(); try { manager.MaybeRefreshBlocking(); } catch (Exception ioe) when(ioe.IsIOException()) { throw RuntimeException.Create(ioe); } } }
private int InnerPurge(IndexWriter writer) { if (Debugging.AssertsEnabled) { Debugging.Assert(purgeLock.IsHeldByCurrentThread); } int numPurged = 0; while (true) { FlushTicket head; bool canPublish; UninterruptableMonitor.Enter(this); try { head = queue.Count <= 0 ? null : queue.Peek(); canPublish = head != null && head.CanPublish; // do this synced } finally { UninterruptableMonitor.Exit(this); } if (canPublish) { numPurged++; try { /* * if we block on publish -> lock IW -> lock BufferedDeletes we don't block * concurrent segment flushes just because they want to append to the queue. * the downside is that we need to force a purge on fullFlush since ther could * be a ticket still in the queue. */ head.Publish(writer); } finally { UninterruptableMonitor.Enter(this); try { // finally remove the published ticket from the queue FlushTicket poll = queue.Dequeue(); ticketCount.DecrementAndGet(); if (Debugging.AssertsEnabled) { Debugging.Assert(poll == head); } } finally { UninterruptableMonitor.Exit(this); } } } else { break; } } return(numPurged); }
internal void Persist() { UninterruptableMonitor.Enter(this); try { string fileName = SNAPSHOTS_PREFIX + nextWriteGen; IndexOutput @out = dir.CreateOutput(fileName, IOContext.DEFAULT); bool success = false; try { CodecUtil.WriteHeader(@out, CODEC_NAME, VERSION_CURRENT); @out.WriteVInt32(m_refCounts.Count); foreach (KeyValuePair <long, int> ent in m_refCounts) { @out.WriteVInt64(ent.Key); @out.WriteVInt32(ent.Value); } success = true; } finally { if (!success) { IOUtils.DisposeWhileHandlingException(@out); try { dir.DeleteFile(fileName); } catch (Exception e) when(e.IsException()) { // Suppress so we keep throwing original exception } } else { IOUtils.Dispose(@out); } } dir.Sync(/*Collections.singletonList(*/ new[] { fileName } /*)*/); if (nextWriteGen > 0) { string lastSaveFile = SNAPSHOTS_PREFIX + (nextWriteGen - 1); try { dir.DeleteFile(lastSaveFile); } catch (Exception ioe) when(ioe.IsIOException()) { // OK: likely it didn't exist } } nextWriteGen++; } finally { UninterruptableMonitor.Exit(this); } }
public override void EndElement(string @namespace, string simple, string qualified) { int elemType = GetElementType(qualified); switch (elemType) { case PAGE: // the body must be null and we either are keeping image docs or the // title does not start with Image: if (body != null && (outerInstance.keepImages || !title.StartsWith("Image:", StringComparison.Ordinal))) { string[] tmpTuple = new string[LENGTH]; tmpTuple[TITLE] = title.Replace('\t', ' '); tmpTuple[DATE] = time.Replace('\t', ' '); tmpTuple[BODY] = Regex.Replace(body, "[\t\n]", " "); tmpTuple[ID] = id; UninterruptableMonitor.Enter(this); try { while (tuple != null && !stopped) { try { UninterruptableMonitor.Wait(this); //wait(); } catch (System.Threading.ThreadInterruptedException ie) { throw new Util.ThreadInterruptedException(ie); } } tuple = tmpTuple; UninterruptableMonitor.Pulse(this); //notify(); } finally { UninterruptableMonitor.Exit(this); } } break; case BODY: body = contents.ToString(); //workaround that startswith doesn't have an ignore case option, get at least 10 chars. string startsWith = body.Substring(0, Math.Min(10, contents.Length) - 0).ToLowerInvariant(); if (startsWith.StartsWith("#redirect", StringComparison.Ordinal)) { body = null; } break; case DATE: time = Time(contents.ToString()); break; case TITLE: title = contents.ToString(); break; case ID: //the doc id is the first one in the page. All other ids after that one can be ignored according to the schema if (id == null) { id = contents.ToString(); } break; default: // this element should be discarded. break; } }
public void Run() { try { Sax.IXMLReader reader = new TagSoup.Parser(); //XMLReaderFactory.createXMLReader(); reader.ContentHandler = this; reader.ErrorHandler = this; while (!stopped) { Stream localFileIS = outerInstance.@is; if (localFileIS != null) { // null means fileIS was closed on us try { // To work around a bug in XERCES (XERCESJ-1257), we assume the XML is always UTF8, so we simply provide reader. reader.Parse(new InputSource(IOUtils.GetDecodingReader(localFileIS, Encoding.UTF8))); } catch (Exception ioe) when(ioe.IsIOException()) { UninterruptableMonitor.Enter(outerInstance); try { if (localFileIS != outerInstance.@is) { // fileIS was closed on us, so, just fall through } else { // Exception is real throw; // LUCENENET: CA2200: Rethrow to preserve stack details (https://docs.microsoft.com/en-us/visualstudio/code-quality/ca2200-rethrow-to-preserve-stack-details) } } finally { UninterruptableMonitor.Exit(outerInstance); } } } UninterruptableMonitor.Enter(this); try { if (stopped || !outerInstance.m_forever) { nmde = new NoMoreDataException(); UninterruptableMonitor.Pulse(this); //notify(); return; } else if (localFileIS == outerInstance.@is) { // If file is not already re-opened then re-open it now outerInstance.@is = outerInstance.OpenInputStream(); } } finally { UninterruptableMonitor.Exit(this); } } } catch (SAXException sae) { throw RuntimeException.Create(sae); } catch (Exception ioe) when(ioe.IsIOException()) { throw RuntimeException.Create(ioe); } finally { UninterruptableMonitor.Enter(this); try { threadDone = true; UninterruptableMonitor.Pulse(this); //Notify(); } finally { UninterruptableMonitor.Exit(this); } } }
public override NumericDocValues GetNumeric(FieldInfo field) { UninterruptableMonitor.Enter(this); try { if (!numericInstances.TryGetValue(field.Number, out NumericDocValues instance)) { string fileName = IndexFileNames.SegmentFileName(state.SegmentInfo.Name + "_" + Convert.ToString(field.Number, CultureInfo.InvariantCulture), segmentSuffix, "dat"); IndexInput input = dir.OpenInput(fileName, state.Context); bool success = false; try { var type = field.GetAttribute(legacyKey).ToLegacyDocValuesType(); //switch (Enum.Parse(typeof(LegacyDocValuesType), field.GetAttribute(LegacyKey))) //{ if (type == LegacyDocValuesType.VAR_INTS) { instance = LoadVarInt32sField(/* field, // LUCENENET: Never read */ input); } else if (type == LegacyDocValuesType.FIXED_INTS_8) { instance = LoadByteField(/* field, // LUCENENET: Never read */ input); } else if (type == LegacyDocValuesType.FIXED_INTS_16) { instance = LoadInt16Field(/* field, // LUCENENET: Never read */ input); } else if (type == LegacyDocValuesType.FIXED_INTS_32) { instance = LoadInt32Field(/* field, // LUCENENET: Never read */ input); } else if (type == LegacyDocValuesType.FIXED_INTS_64) { instance = LoadInt64Field(/* field, // LUCENENET: Never read */ input); } else if (type == LegacyDocValuesType.FLOAT_32) { instance = LoadSingleField(/* field, // LUCENENET: Never read */ input); } else if (type == LegacyDocValuesType.FLOAT_64) { instance = LoadDoubleField(/* field, // LUCENENET: Never read */ input); } else { throw AssertionError.Create(); } CodecUtil.CheckEOF(input); success = true; } finally { if (success) { IOUtils.Dispose(input); } else { IOUtils.DisposeWhileHandlingException(input); } } numericInstances[field.Number] = instance; } return(instance); } finally { UninterruptableMonitor.Exit(this); } }
protected override IndexSearcher GetCurrentSearcher() { if (Random.Next(10) == 7) { // NOTE: not best practice to call maybeReopen // synchronous to your search threads, but still we // test as apps will presumably do this for // simplicity: if (mgr.MaybeRefresh()) { lifetimeMGR.Prune(pruner); } } IndexSearcher s = null; UninterruptableMonitor.Enter(pastSearchers); try { while (pastSearchers.Count != 0 && Random.NextDouble() < 0.25) { // 1/4 of the time pull an old searcher, ie, simulate // a user doing a follow-on action on a previous // search (drilling down/up, clicking next/prev page, // etc.) long token = pastSearchers[Random.Next(pastSearchers.Count)]; s = lifetimeMGR.Acquire(token); if (s is null) { // Searcher was pruned pastSearchers.Remove(token); } else { break; } } } finally { UninterruptableMonitor.Exit(pastSearchers); } if (s is null) { s = mgr.Acquire(); if (s.IndexReader.NumDocs != 0) { long token = lifetimeMGR.Record(s); UninterruptableMonitor.Enter(pastSearchers); try { if (!pastSearchers.Contains(token)) { pastSearchers.Add(token); } } finally { UninterruptableMonitor.Exit(pastSearchers); } } } return(s); }
/// <summary> /// Reads the snapshots information from the given <see cref="Directory"/>. This /// method can be used if the snapshots information is needed, however you /// cannot instantiate the deletion policy (because e.g., some other process /// keeps a lock on the snapshots directory). /// </summary> private void LoadPriorSnapshots() { UninterruptableMonitor.Enter(this); try { long genLoaded = -1; Exception ioe = null; // LUCENENET: No need to cast to IOExcpetion IList <string> snapshotFiles = new JCG.List <string>(); foreach (string file in dir.ListAll()) { if (file.StartsWith(SNAPSHOTS_PREFIX, StringComparison.Ordinal)) { // LUCENENET: Optimized to not allocate a substring during the parse long gen = Long.Parse(file, SNAPSHOTS_PREFIX.Length, file.Length - SNAPSHOTS_PREFIX.Length, radix: 10); if (genLoaded == -1 || gen > genLoaded) { snapshotFiles.Add(file); IDictionary <long, int> m = new Dictionary <long, int>(); IndexInput @in = dir.OpenInput(file, IOContext.DEFAULT); try { CodecUtil.CheckHeader(@in, CODEC_NAME, VERSION_START, VERSION_START); int count = @in.ReadVInt32(); for (int i = 0; i < count; i++) { long commitGen = @in.ReadVInt64(); int refCount = @in.ReadVInt32(); m[commitGen] = refCount; } } catch (Exception ioe2) when(ioe2.IsIOException()) { // Save first exception & throw in the end if (ioe is null) { ioe = ioe2; } } finally { @in.Dispose(); } genLoaded = gen; m_refCounts.Clear(); m_refCounts.PutAll(m); } } } if (genLoaded == -1) { // Nothing was loaded... if (ioe != null) { // ... not for lack of trying: ExceptionDispatchInfo.Capture(ioe).Throw(); // LUCENENET: Rethrow to preserve stack details from the original throw } } else { if (snapshotFiles.Count > 1) { // Remove any broken / old snapshot files: string curFileName = SNAPSHOTS_PREFIX + genLoaded; foreach (string file in snapshotFiles) { if (!curFileName.Equals(file, StringComparison.Ordinal)) { dir.DeleteFile(file); } } } nextWriteGen = 1 + genLoaded; } } finally { UninterruptableMonitor.Exit(this); } }
public override void Run() { if (Verbose) { Console.WriteLine(Thread.CurrentThread.Name + ": launch search thread"); } while (J2N.Time.NanoTime() / J2N.Time.MillisecondsPerNanosecond < stopTimeMS) // LUCENENET: Use NanoTime() rather than CurrentTimeMilliseconds() for more accurate/reliable results { try { IndexSearcher s = outerInstance.GetCurrentSearcher(); try { // Verify 1) IW is correctly setting // diagnostics, and 2) segment warming for // merged segments is actually happening: foreach (AtomicReaderContext sub in s.IndexReader.Leaves) { SegmentReader segReader = (SegmentReader)sub.Reader; IDictionary <string, string> diagnostics = segReader.SegmentInfo.Info.Diagnostics; assertNotNull(diagnostics); diagnostics.TryGetValue("source", out string source); assertNotNull(source); if (source.Equals("merge", StringComparison.Ordinal)) { #if !FEATURE_CONDITIONALWEAKTABLE_ADDORUPDATE UninterruptableMonitor.Enter(outerInstance.warmedLock); try { #endif assertTrue("sub reader " + sub + " wasn't warmed: warmed=" + outerInstance.warmed + " diagnostics=" + diagnostics + " si=" + segReader.SegmentInfo, // LUCENENET: ConditionalWeakTable doesn't have ContainsKey, so we normalize to TryGetValue !outerInstance.m_assertMergedSegmentsWarmed || outerInstance.warmed.TryGetValue(segReader.core, out BooleanRef _)); #if !FEATURE_CONDITIONALWEAKTABLE_ADDORUPDATE } finally { UninterruptableMonitor.Exit(outerInstance.warmedLock); } #endif } } if (s.IndexReader.NumDocs > 0) { outerInstance.SmokeTestSearcher(s); Fields fields = MultiFields.GetFields(s.IndexReader); if (fields is null) { continue; } Terms terms = fields.GetTerms("body"); if (terms is null) { continue; } TermsEnum termsEnum = terms.GetEnumerator(); int seenTermCount = 0; int shift; int trigger; if (totTermCount < 30) { shift = 0; trigger = 1; } else { trigger = totTermCount / 30; shift = Random.Next(trigger); } while (J2N.Time.NanoTime() / J2N.Time.MillisecondsPerNanosecond < stopTimeMS) // LUCENENET: Use NanoTime() rather than CurrentTimeMilliseconds() for more accurate/reliable results { if (!termsEnum.MoveNext()) { totTermCount.Value = seenTermCount; break; } seenTermCount++; // search 30 terms if ((seenTermCount + shift) % trigger == 0) { //if (VERBOSE) { //System.out.println(Thread.currentThread().getName() + " now search body:" + term.Utf8ToString()); //} totHits.AddAndGet(outerInstance.RunQuery(s, new TermQuery(new Term("body", termsEnum.Term)))); } } //if (VERBOSE) { //System.out.println(Thread.currentThread().getName() + ": search done"); //} } } finally { outerInstance.ReleaseSearcher(s); } } catch (Exception t) when(t.IsThrowable()) { Console.WriteLine(Thread.CurrentThread.Name + ": hit exc"); outerInstance.m_failed.Value = (true); Console.WriteLine(t.ToString()); throw RuntimeException.Create(t); } } }
/* * FlushAllThreads is synced by IW fullFlushLock. Flushing all threads is a * two stage operation; the caller must ensure (in try/finally) that finishFlush * is called after this method, to release the flush lock in DWFlushControl */ internal bool FlushAllThreads(IndexWriter indexWriter) { DocumentsWriterDeleteQueue flushingDeleteQueue; if (infoStream.IsEnabled("DW")) { infoStream.Message("DW", "startFullFlush"); } UninterruptableMonitor.Enter(this); try { pendingChangesInCurrentFullFlush = AnyChanges(); flushingDeleteQueue = deleteQueue; /* Cutover to a new delete queue. this must be synced on the flush control * otherwise a new DWPT could sneak into the loop with an already flushing * delete queue */ flushControl.MarkForFullFlush(); // swaps the delQueue synced on FlushControl if (Debugging.AssertsEnabled) { Debugging.Assert(SetFlushingDeleteQueue(flushingDeleteQueue)); } } finally { UninterruptableMonitor.Exit(this); } if (Debugging.AssertsEnabled) { Debugging.Assert(currentFullFlushDelQueue != null); Debugging.Assert(currentFullFlushDelQueue != deleteQueue); } bool anythingFlushed = false; try { DocumentsWriterPerThread flushingDWPT; // Help out with flushing: while ((flushingDWPT = flushControl.NextPendingFlush()) != null) { anythingFlushed |= DoFlush(flushingDWPT); } // If a concurrent flush is still in flight wait for it flushControl.WaitForFlush(); if (!anythingFlushed && flushingDeleteQueue.AnyChanges()) // apply deletes if we did not flush any document { if (infoStream.IsEnabled("DW")) { infoStream.Message("DW", Thread.CurrentThread.Name + ": flush naked frozen global deletes"); } ticketQueue.AddDeletes(flushingDeleteQueue); } ticketQueue.ForcePurge(indexWriter); if (Debugging.AssertsEnabled) { Debugging.Assert(!flushingDeleteQueue.AnyChanges() && !ticketQueue.HasTickets); } } finally { if (Debugging.AssertsEnabled) { Debugging.Assert(flushingDeleteQueue == currentFullFlushDelQueue); } } return(anythingFlushed); }
/// <summary> /// Reads a sequence of bytes from a <see cref="Stream"/> to the given <see cref="ByteBuffer"/>, starting at the given position. /// The <paramref name="stream"/> must be both seekable and readable. /// </summary> /// <param name="stream">The stream to read.</param> /// <param name="destination">The <see cref="ByteBuffer"/> to write to.</param> /// <param name="position">The file position at which the transfer is to begin; must be non-negative.</param> /// <returns>The number of bytes read, possibly zero.</returns> /// <exception cref="ArgumentNullException"><paramref name="stream"/> or <paramref name="destination"/> is <c>null</c></exception> /// <exception cref="NotSupportedException"> /// <paramref name="stream"/> is not readable. /// <para/> /// -or- /// <para/> /// <paramref name="stream"/> is not seekable. /// </exception> /// <exception cref="ArgumentOutOfRangeException"> /// <paramref name="position"/> is less than 0. /// <para/> /// -or- /// <para/> /// <paramref name="position"/> is greater than the <see cref="Stream.Length"/> of the stream. /// </exception> /// <exception cref="IOException">An I/O error occurs.</exception> /// <exception cref="ObjectDisposedException"><paramref name="stream"/> has already been disposed.</exception> /// <remarks> /// This method is atomic when used by itself, but does not synchronize with the rest of the stream methods. /// </remarks> public static int Read(this Stream stream, ByteBuffer destination, long position) { if (stream is null) { throw new ArgumentNullException(nameof(stream)); } if (destination is null) { throw new ArgumentNullException(nameof(destination)); } if (position < 0) { throw new ArgumentOutOfRangeException(nameof(position)); } if (!stream.CanSeek) { throw new NotSupportedException("Stream does not support seeking."); } if (!stream.CanRead) { throw new NotSupportedException("Stream does not support reading."); } if (position > stream.Length) { return(0); } int read = 0; UninterruptableMonitor.Enter(readLock); try { long originalPosition = stream.Position; stream.Seek(position, SeekOrigin.Begin); if (destination.HasArray) { // If the buffer has an array, we can write to it directly and save // an extra copy operation. // Read from the stream read = stream.Read(destination.Array, destination.Position, destination.Remaining); destination.Position += read; } else { // If the buffer has no array, we must use a local buffer byte[] buffer = new byte[destination.Remaining]; // Read from the stream read = stream.Read(buffer, 0, buffer.Length); // Write to the byte buffer destination.Put(buffer, 0, read); } // Per Java's FileChannel.Read(), we don't want to alter the position // of the stream, so we return it as it was originally. stream.Seek(originalPosition, SeekOrigin.Begin); } finally { UninterruptableMonitor.Exit(readLock); } return(read); }
public virtual void Collect(int doc) { //System.out.println("FP.collect doc=" + doc); // If orderedGroups != null we already have collected N groups and // can short circuit by comparing this document to the bottom group, // without having to find what group this document belongs to. // Even if this document belongs to a group in the top N, we'll know that // we don't have to update that group. // Downside: if the number of unique groups is very low, this is // wasted effort as we will most likely be updating an existing group. if (m_orderedGroups != null) { for (int compIDX = 0; ; compIDX++) { int c = reversed[compIDX] * comparers[compIDX].CompareBottom(doc); if (c < 0) { // Definitely not competitive. So don't even bother to continue return; } else if (c > 0) { // Definitely competitive. break; } else if (compIDX == compIDXEnd) { // Here c=0. If we're at the last comparer, this doc is not // competitive, since docs are visited in doc Id order, which means // this doc cannot compete with any other document in the queue. return; } } } // TODO: should we add option to mean "ignore docs that // don't have the group field" (instead of stuffing them // under null group)? TGroupValue groupValue = GetDocGroupValue(doc); if (!groupMap.TryGetValue(groupValue, out CollectedSearchGroup <TGroupValue> group)) { // First time we are seeing this group, or, we've seen // it before but it fell out of the top N and is now // coming back if (groupMap.Count < topNGroups) { // Still in startup transient: we have not // seen enough unique groups to start pruning them; // just keep collecting them // Add a new CollectedSearchGroup: CollectedSearchGroup <TGroupValue> sg = new CollectedSearchGroup <TGroupValue>(); sg.GroupValue = CopyDocGroupValue(groupValue, default); sg.ComparerSlot = groupMap.Count; sg.TopDoc = docBase + doc; foreach (FieldComparer fc in comparers) { fc.Copy(sg.ComparerSlot, doc); } groupMap[sg.GroupValue] = sg; if (groupMap.Count == topNGroups) { // End of startup transient: we now have max // number of groups; from here on we will drop // bottom group when we insert new one: BuildSortedSet(); } return; } // We already tested that the document is competitive, so replace // the bottom group with this new group. //CollectedSearchGroup<TGroupValue> bottomGroup = orderedGroups.PollLast(); CollectedSearchGroup <TGroupValue> bottomGroup; UninterruptableMonitor.Enter(m_orderedGroups); try { bottomGroup = m_orderedGroups.Last(); m_orderedGroups.Remove(bottomGroup); } finally { UninterruptableMonitor.Exit(m_orderedGroups); } if (Debugging.AssertsEnabled) { Debugging.Assert(m_orderedGroups.Count == topNGroups - 1); } groupMap.Remove(bottomGroup.GroupValue); // reuse the removed CollectedSearchGroup bottomGroup.GroupValue = CopyDocGroupValue(groupValue, bottomGroup.GroupValue); bottomGroup.TopDoc = docBase + doc; foreach (FieldComparer fc in comparers) { fc.Copy(bottomGroup.ComparerSlot, doc); } groupMap[bottomGroup.GroupValue] = bottomGroup; m_orderedGroups.Add(bottomGroup); if (Debugging.AssertsEnabled) { Debugging.Assert(m_orderedGroups.Count == topNGroups); } int lastComparerSlot = m_orderedGroups.Last().ComparerSlot; foreach (FieldComparer fc in comparers) { fc.SetBottom(lastComparerSlot); } return; } // Update existing group: for (int compIDX = 0; ; compIDX++) { FieldComparer fc = comparers[compIDX]; fc.Copy(spareSlot, doc); int c = reversed[compIDX] * fc.Compare(group.ComparerSlot, spareSlot); if (c < 0) { // Definitely not competitive. return; } else if (c > 0) { // Definitely competitive; set remaining comparers: for (int compIDX2 = compIDX + 1; compIDX2 < comparers.Length; compIDX2++) { comparers[compIDX2].Copy(spareSlot, doc); } break; } else if (compIDX == compIDXEnd) { // Here c=0. If we're at the last comparer, this doc is not // competitive, since docs are visited in doc Id order, which means // this doc cannot compete with any other document in the queue. return; } } // Remove before updating the group since lookup is done via comparers // TODO: optimize this CollectedSearchGroup <TGroupValue> prevLast; if (m_orderedGroups != null) { UninterruptableMonitor.Enter(m_orderedGroups); try { prevLast = m_orderedGroups.Last(); m_orderedGroups.Remove(group); } finally { UninterruptableMonitor.Exit(m_orderedGroups); } if (Debugging.AssertsEnabled) { Debugging.Assert(m_orderedGroups.Count == topNGroups - 1); } } else { prevLast = null; } group.TopDoc = docBase + doc; // Swap slots int tmp = spareSlot; spareSlot = group.ComparerSlot; group.ComparerSlot = tmp; // Re-add the changed group if (m_orderedGroups != null) { m_orderedGroups.Add(group); if (Debugging.AssertsEnabled) { Debugging.Assert(m_orderedGroups.Count == topNGroups); } var newLast = m_orderedGroups.Last(); // If we changed the value of the last group, or changed which group was last, then update bottom: if (group == newLast || prevLast != newLast) { foreach (FieldComparer fc in comparers) { fc.SetBottom(newLast.ComparerSlot); } } } }