public virtual Filter GetFilter(XmlElement e) { UninterruptableMonitor.Enter(this); try { XmlElement childElement = DOMUtils.GetFirstChildOrFail(e); if (filterCache is null) { filterCache = new LurchTable <object, Filter>(LurchTableOrder.Access, cacheSize); } // Test to see if child Element is a query or filter that needs to be // cached IQueryBuilder qb = queryFactory.GetQueryBuilder(childElement.Name); object cacheKey = null; Query q = null; Filter f = null; if (qb != null) { q = qb.GetQuery(childElement); cacheKey = q; } else { f = filterFactory.GetFilter(childElement); cacheKey = f; } if (filterCache.TryGetValue(cacheKey, out Filter cachedFilter) && cachedFilter != null) { return(cachedFilter); // cache hit } //cache miss if (qb != null) { cachedFilter = new QueryWrapperFilter(q); } else { cachedFilter = new CachingWrapperFilter(f); } filterCache[cacheKey] = cachedFilter; return(cachedFilter); } finally { UninterruptableMonitor.Exit(this); } }
public override void Run() { Random rnd = LuceneTestCase.Random; while (!stopped) { if (index % 2 == 0) { // refresh reader synchronized ReaderCouple c = (outerInstance.RefreshReader(r, test, index, true)); readersToClose.Add(c.newReader); readersToClose.Add(c.refreshedReader); readers.Add(c); // prevent too many readers break; } else { // not synchronized DirectoryReader refreshed = DirectoryReader.OpenIfChanged(r); if (refreshed == null) { refreshed = r; } IndexSearcher searcher = #if FEATURE_INSTANCE_TESTDATA_INITIALIZATION outerInstance. #endif NewSearcher(refreshed); ScoreDoc[] hits = searcher.Search(new TermQuery(new Term("field1", "a" + rnd.Next(refreshed.MaxDoc))), null, 1000).ScoreDocs; if (hits.Length > 0) { searcher.Doc(hits[0].Doc); } if (refreshed != r) { refreshed.Dispose(); } } UninterruptableMonitor.Enter(this); try { UninterruptableMonitor.Wait(this, TimeSpan.FromMilliseconds(TestUtil.NextInt32(Random, 1, 100))); } finally { UninterruptableMonitor.Exit(this); } } }
/// <summary> /// Attempts to remove the item from the <see cref="ConcurrentHashSet{T}"/>. /// </summary> /// <param name="item">The item to remove.</param> /// <returns>true if an item was removed successfully; otherwise, false.</returns> public bool TryRemove(T item) { var hashcode = _comparer.GetHashCode(item); while (true) { var tables = _tables; GetBucketAndLockNo(hashcode, out int bucketNo, out int lockNo, tables.Buckets.Length, tables.Locks.Length); object syncRoot = tables.Locks[lockNo]; UninterruptableMonitor.Enter(syncRoot); try { // If the table just got resized, we may not be holding the right lock, and must retry. // This should be a rare occurrence. if (tables != _tables) { continue; } Node previous = null; for (var current = tables.Buckets[bucketNo]; current != null; current = current.Next) { Debug.Assert((previous == null && current == tables.Buckets[bucketNo]) || previous.Next == current); if (hashcode == current.Hashcode && _comparer.Equals(current.Item, item)) { if (previous == null) { Volatile.Write(ref tables.Buckets[bucketNo], current.Next); } else { previous.Next = current.Next; } tables.CountPerLock[lockNo]--; return(true); } previous = current; } } finally { UninterruptableMonitor.Exit(syncRoot); } return(false); } }
public virtual IDictionary <string, Document> IndexRandom(int nThreads, int iterations, int range, Directory dir, int maxThreadStates, bool doReaderPooling) { IDictionary <string, Document> docs = new Dictionary <string, Document>(); IndexWriter w = RandomIndexWriter.MockIndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random)).SetOpenMode(OpenMode.CREATE).SetRAMBufferSizeMB(0.1).SetMaxBufferedDocs(maxBufferedDocs).SetIndexerThreadPool(new DocumentsWriterPerThreadPool(maxThreadStates)).SetReaderPooling(doReaderPooling).SetMergePolicy(NewLogMergePolicy()), new YieldTestPoint(this)); LogMergePolicy lmp = (LogMergePolicy)w.Config.MergePolicy; lmp.NoCFSRatio = 0.0; lmp.MergeFactor = mergeFactor; threads = new IndexingThread[nThreads]; for (int i = 0; i < threads.Length; i++) { IndexingThread th = new IndexingThread(this); th.w = w; th.@base = 1000000 * i; th.range = range; th.iterations = iterations; threads[i] = th; } for (int i = 0; i < threads.Length; i++) { threads[i].Start(); } for (int i = 0; i < threads.Length; i++) { threads[i].Join(); } //w.ForceMerge(1); w.Dispose(); for (int i = 0; i < threads.Length; i++) { IndexingThread th = threads[i]; UninterruptableMonitor.Enter(th); try { docs.PutAll(th.docs); } finally { UninterruptableMonitor.Exit(th); } } //System.out.println("TEST: checkindex"); TestUtil.CheckIndex(dir); return(docs); }
public override void ResetInputs() { UninterruptableMonitor.Enter(this); try { base.ResetInputs(); nextFile = 0; iteration = 0; } finally { UninterruptableMonitor.Exit(this); } }
public override bool Store(DataOutput output) { UninterruptableMonitor.Enter(this); try { output.WriteVInt64(count); WriteRecursively(output, root); return(true); } finally { UninterruptableMonitor.Exit(this); } }
protected override void Dispose(bool disposing) { if (disposing) { UninterruptableMonitor.Enter(this); try { // whether or not we have created a file, we need to remove // the lock instance from the dictionary that tracks them. try { UninterruptableMonitor.Enter(NativeFSLockFactory._locks); try { NativeFSLockFactory._locks.Remove(path); } finally { UninterruptableMonitor.Exit(NativeFSLockFactory._locks); } } finally { if (channel != null) { try { IOUtils.DisposeWhileHandlingException(channel); } finally { channel = null; } // try to delete the file if we created it, but it's not an error if we can't. try { File.Delete(path); } catch { } } } } finally { UninterruptableMonitor.Exit(this); } } }
public virtual void Reset(Random random) { UninterruptableMonitor.Enter(syncLock); try { Close(); Open(random); id.Value = 0; } finally { UninterruptableMonitor.Exit(syncLock); } }
public override SortedSetDocValues GetSortedSetDocValues(string field) { EnsureOpen(); OrdinalMap map = null; UninterruptableMonitor.Enter(cachedOrdMaps); try { if (!cachedOrdMaps.TryGetValue(field, out map)) { // uncached, or not a multi dv SortedSetDocValues dv = MultiDocValues.GetSortedSetValues(@in, field); if (dv is MultiSortedSetDocValues docValues) { map = docValues.Mapping; if (map.owner == CoreCacheKey) { cachedOrdMaps[field] = map; } } return(dv); } } finally { UninterruptableMonitor.Exit(cachedOrdMaps); } // cached ordinal map if (FieldInfos.FieldInfo(field).DocValuesType != DocValuesType.SORTED_SET) { return(null); } if (Debugging.AssertsEnabled) { Debugging.Assert(map != null); } int size = @in.Leaves.Count; var values = new SortedSetDocValues[size]; int[] starts = new int[size + 1]; for (int i = 0; i < size; i++) { AtomicReaderContext context = @in.Leaves[i]; SortedSetDocValues v = context.AtomicReader.GetSortedSetDocValues(field) ?? DocValues.EMPTY_SORTED_SET; values[i] = v; starts[i] = context.DocBase; } starts[size] = MaxDoc; return(new MultiSortedSetDocValues(values, starts, map)); }
internal void Clear() { UninterruptableMonitor.Enter(this); try { numberToName.Clear(); nameToNumber.Clear(); docValuesType.Clear(); } finally { UninterruptableMonitor.Exit(this); } }
internal void Abort(IndexWriter writer) { UninterruptableMonitor.Enter(this); try { if (Debugging.AssertsEnabled) { Debugging.Assert(!UninterruptableMonitor.IsEntered(writer), "IndexWriter lock should never be hold when aborting"); } bool success = false; JCG.HashSet <string> newFilesSet = new JCG.HashSet <string>(); try { deleteQueue.Clear(); if (infoStream.IsEnabled("DW")) { infoStream.Message("DW", "abort"); } int limit = perThreadPool.NumThreadStatesActive; for (int i = 0; i < limit; i++) { ThreadState perThread = perThreadPool.GetThreadState(i); perThread.@Lock(); try { AbortThreadState(perThread, newFilesSet); } finally { perThread.Unlock(); } } flushControl.AbortPendingFlushes(newFilesSet); PutEvent(new DeleteNewFilesEvent(newFilesSet)); flushControl.WaitForFlush(); success = true; } finally { if (infoStream.IsEnabled("DW")) { infoStream.Message("DW", "done abort; abortedFiles=" + string.Format(J2N.Text.StringFormatter.InvariantCulture, "{0}", newFilesSet) + " success=" + success); } } } finally { UninterruptableMonitor.Exit(this); } }
// return next qnum protected virtual int NextQnum() { UninterruptableMonitor.Enter(this); try { int res = m_qnum; m_qnum = (m_qnum + 1) % m_queries.Length; return(res); } finally { UninterruptableMonitor.Exit(this); } }
private void PutPostingsFormatTypeImpl(Type postingsFormat) { string name = GetServiceName(postingsFormat); UninterruptableMonitor.Enter(m_initializationLock); try { postingsFormatNameToTypeMap[name] = postingsFormat; } finally { UninterruptableMonitor.Exit(m_initializationLock); } }
/// <summary> /// Gets the <see cref="DocValuesFormat"/> instance from the provided <paramref name="name"/>. /// </summary> /// <param name="name">The name of the <see cref="DocValuesFormat"/> instance to retrieve.</param> /// <returns>The <see cref="DocValuesFormat"/> instance.</returns> public virtual DocValuesFormat GetDocValuesFormat(string name) { EnsureInitialized(); // Safety in case a subclass doesn't call it UninterruptableMonitor.Enter(m_initializationLock); try { Type codecType = GetDocValuesFormatType(name); return(GetDocValuesFormat(codecType)); } finally { UninterruptableMonitor.Exit(m_initializationLock); } }
private void PutDocValuesFormatTypeImpl(Type docValuesFormat) { string name = GetServiceName(docValuesFormat); UninterruptableMonitor.Enter(m_initializationLock); try { docValuesFormatNameToTypeMap[name] = docValuesFormat; } finally { UninterruptableMonitor.Exit(m_initializationLock); } }
private void RefreshDone() { UninterruptableMonitor.Enter(this); try { // if we're finishing, make it out so that all waiting search threads will return searchingGen = finish ? long.MaxValue : refreshStartGen; m_notify.Set(); // LUCENENET NOTE: Will notify all and remain signaled, so it must be reset in WaitForGeneration } finally { UninterruptableMonitor.Exit(this); } }
internal virtual byte[] Get() { UninterruptableMonitor.Enter(this); // TODO use BlockingCollection / BCL datastructures instead try { var retArray = pool[0]; pool.RemoveAt(0); return(retArray); } finally { UninterruptableMonitor.Exit(this); } }
internal virtual void AddSegment(SegmentFlushTicket ticket, FlushedSegment segment) { UninterruptableMonitor.Enter(this); try { // the actual flush is done asynchronously and once done the FlushedSegment // is passed to the flush ticket ticket.SetSegment(segment); } finally { UninterruptableMonitor.Exit(this); } }
public override void Reset() { base.Reset(); UninterruptableMonitor.Enter(syncLock); try { breaker.SetText(buffer, 0, 0); } finally { UninterruptableMonitor.Exit(syncLock); } length = usableLength = offset = 0; }
private void PutCodecTypeImpl(Type codec) { string name = GetServiceName(codec); UninterruptableMonitor.Enter(m_initializationLock); try { codecNameToTypeMap[name] = codec; } finally { UninterruptableMonitor.Exit(m_initializationLock); } }
internal void LockAndAbortAll(IndexWriter indexWriter) { UninterruptableMonitor.Enter(this); try { if (Debugging.AssertsEnabled) { Debugging.Assert(indexWriter.HoldsFullFlushLock); } if (infoStream.IsEnabled("DW")) { infoStream.Message("DW", "lockAndAbortAll"); } bool success = false; try { deleteQueue.Clear(); int limit = perThreadPool.MaxThreadStates; JCG.HashSet <string> newFilesSet = new JCG.HashSet <string>(); for (int i = 0; i < limit; i++) { ThreadState perThread = perThreadPool.GetThreadState(i); perThread.@Lock(); AbortThreadState(perThread, newFilesSet); } deleteQueue.Clear(); flushControl.AbortPendingFlushes(newFilesSet); PutEvent(new DeleteNewFilesEvent(newFilesSet)); flushControl.WaitForFlush(); success = true; } finally { if (infoStream.IsEnabled("DW")) { infoStream.Message("DW", "finished lockAndAbortAll success=" + success); } if (!success) { // if something happens here we unlock all states again UnlockAllAfterAbortAll(indexWriter); } } } finally { UninterruptableMonitor.Exit(this); } }
/// <summary> /// For definition of "check point" see <see cref="IndexWriter"/> comments: /// "Clarification: Check Points (and commits)". /// <para/> /// Writer calls this when it has made a "consistent /// change" to the index, meaning new files are written to /// the index and the in-memory <see cref="SegmentInfos"/> have been /// modified to point to those files. /// <para/> /// This may or may not be a commit (segments_N may or may /// not have been written). /// <para/> /// We simply incref the files referenced by the new /// <see cref="SegmentInfos"/> and decref the files we had previously /// seen (if any). /// <para/> /// If this is a commit, we also call the policy to give it /// a chance to remove other commits. If any commits are /// removed, we decref their files as well. /// </summary> public void Checkpoint(SegmentInfos segmentInfos, bool isCommit) { if (Debugging.AssertsEnabled) { Debugging.Assert(IsLocked); Debugging.Assert(UninterruptableMonitor.IsEntered(writer)); } long t0 = 0; if (infoStream.IsEnabled("IFD")) { t0 = J2N.Time.NanoTime(); infoStream.Message("IFD", "now checkpoint \"" + writer.SegString(writer.ToLiveInfos(segmentInfos).Segments) + "\" [" + segmentInfos.Count + " segments " + "; isCommit = " + isCommit + "]"); } // Try again now to delete any previously un-deletable // files (because they were in use, on Windows): DeletePendingFiles(); // Incref the files: IncRef(segmentInfos, isCommit); if (isCommit) { // Append to our commits list: commits.Add(new CommitPoint(commitsToDelete, directory, segmentInfos)); // Tell policy so it can remove commits: policy.OnCommit(commits); // Decref files for commits that were deleted by the policy: DeleteCommits(); } else { // DecRef old files from the last checkpoint, if any: DecRef(lastFiles); lastFiles.Clear(); // Save files so we can decr on next checkpoint/commit: lastFiles.AddRange(segmentInfos.GetFiles(directory, false)); } if (infoStream.IsEnabled("IFD")) { long t1 = J2N.Time.NanoTime(); infoStream.Message("IFD", ((t1 - t0) / 1000000) + " msec to checkpoint"); } }
private void UnCache(string fileName) { // Only let one thread uncache at a time; this only // happens during commit() or close(): UninterruptableMonitor.Enter(uncacheLock); try { if (VERBOSE) { Console.WriteLine("nrtdir.unCache name=" + fileName); } #pragma warning disable 612, 618 if (!cache.FileExists(fileName)) #pragma warning restore 612, 618 { // Another thread beat us... return; } IOContext context = IOContext.DEFAULT; IndexOutput @out = @delegate.CreateOutput(fileName, context); IndexInput @in = null; try { @in = cache.OpenInput(fileName, context); @out.CopyBytes(@in, @in.Length); } finally { IOUtils.Dispose(@in, @out); } // Lock order: uncacheLock -> this UninterruptableMonitor.Enter(this); try { // Must sync here because other sync methods have // if (cache.fileExists(name)) { ... } else { ... }: cache.DeleteFile(fileName); } finally { UninterruptableMonitor.Exit(this); } } finally { UninterruptableMonitor.Exit(uncacheLock); } }
internal string[] Next() { if (t == null) { threadDone = false; t = new ThreadJob(Run); t.IsBackground = true; t.Start(); } string[] result; UninterruptableMonitor.Enter(this); try { while (tuple == null && nmde == null && !threadDone && !stopped) { try { UninterruptableMonitor.Wait(this); } catch (Exception ie) when(ie.IsInterruptedException()) { throw new Util.ThreadInterruptedException(ie); } } if (tuple != null) { result = tuple; tuple = null; Monitor.Pulse(this);// notify(); return(result); } if (nmde != null) { // Set to null so we will re-start thread in case // we are re-used: t = null; throw nmde; } // The thread has exited yet did not hit end of // data, so this means it hit an exception. We // throw NoMorDataException here to force // benchmark to stop the current alg: throw new NoMoreDataException(); } finally { UninterruptableMonitor.Exit(this); } }
/// <summary> /// mark the end of a task /// </summary> public virtual void MarkTaskEnd(TaskStats stats, int count) { UninterruptableMonitor.Enter(this); try { int numParallelTasks = nextTaskRunNum - 1 - stats.TaskRunNum; // note: if the stats were cleared, might be that this stats object is // no longer in points, but this is just ok. stats.MarkEnd(numParallelTasks, count); } finally { UninterruptableMonitor.Exit(this); } }
public virtual void Clear() { UninterruptableMonitor.Enter(this); try { updates.Clear(); nextGen = 1; numTerms.Value = 0; bytesUsed.Value = 0; } finally { UninterruptableMonitor.Exit(this); } }
protected override void SetNextSentence(int sentenceStart, int sentenceEnd) { UninterruptableMonitor.Enter(syncLock); try { this.sentenceStart = sentenceStart; this.sentenceEnd = sentenceEnd; wrapper.SetText(m_buffer, sentenceStart, sentenceEnd - sentenceStart); wordBreaker.SetText(new string(wrapper.Text, wrapper.Start, wrapper.Length)); } finally { UninterruptableMonitor.Exit(syncLock); } }
/// <summary> /// Mark that a task is starting. /// Create a task stats for it and store it as a point. /// </summary> /// <param name="task">The starting task.</param> /// <param name="round">The new task stats created for the starting task.</param> /// <returns></returns> public virtual TaskStats MarkTaskStart(PerfTask task, int round) { UninterruptableMonitor.Enter(this); try { TaskStats stats = new TaskStats(task, NextTaskRunNum(), round); this.currentStats = stats; points.Add(stats); return(stats); } finally { UninterruptableMonitor.Exit(this); } }
internal bool UpdateBinaryDocValue(Term term, string field, BytesRef value) { UninterruptableMonitor.Enter(this); try { DocumentsWriterDeleteQueue deleteQueue = this.deleteQueue; deleteQueue.AddBinaryUpdate(new BinaryDocValuesUpdate(term, field, value)); flushControl.DoOnDelete(); return(ApplyAllDeletes(deleteQueue)); } finally { UninterruptableMonitor.Exit(this); } }