public void SingleAttemptMultiThreadException(int numThreads, int numAttempts) { RunWithScheduler(numThreads, scheduler => { long counter = 0; var disposer = new DisposeOnce <SingleAttempt>(() => { Assert.Equal(counter, 0); Interlocked.Increment(ref counter); throw new InvalidOperationException("Don't touch me!"); }); long pendingAttempts = numAttempts; using (var resetEvent = new ManualResetEvent(false)) { for (int i = 0; i < numAttempts; i++) { var task = new Task(() => { Assert.Throws(typeof(AggregateException), () => disposer.Dispose()); Assert.Equal(counter, 1); if (Interlocked.Decrement(ref pendingAttempts) == 0) { // ReSharper disable once AccessToDisposedClosure resetEvent.Set(); } }); task.Start(scheduler); } resetEvent.WaitOne(); // After this point, all threads have run. Assert.True(disposer.Disposed); Assert.Equal(counter, 1); } }); }
public void WithRetryMultiThreadNoErrors(int numThreads, int numAttempts) { RunWithScheduler(numThreads, scheduler => { long counter = 0; var disposer = new DisposeOnce <SingleAttempt>(() => { Assert.Equal(counter, 0); Interlocked.Increment(ref counter); Assert.Equal(counter, 1); }); long pendingAttempts = numAttempts; using (var resetEvent = new ManualResetEvent(false)) { for (int i = 0; i < numAttempts; i++) { var task = new Task(() => { disposer.Dispose(); if (Interlocked.Decrement(ref pendingAttempts) == 0) { // ReSharper disable once AccessToDisposedClosure resetEvent.Set(); } }); task.Start(scheduler); } resetEvent.WaitOne(); // After this point, all threads have run. Assert.True(disposer.Disposed); Assert.Equal(counter, 1); } }); }
protected AbstractPager(StorageEnvironmentOptions options, bool usePageProtection = false) { DisposeOnceRunner = new DisposeOnce <SingleAttempt>(() => { if (FileName?.FullPath != null) { _options?.IoMetrics?.FileClosed(FileName.FullPath); } if (_pagerState != null) { _pagerState.Release(); _pagerState = null; } if (FileName?.FullPath != null) { NativeMemory.UnregisterFileMapping(FileName.FullPath); } DisposeInternal(); }); _options = options; UsePageProtection = usePageProtection; Debug.Assert((Constants.Storage.PageSize - Constants.Tree.PageHeaderSize) / Constants.Tree.MinKeysInPage >= 1024); NodeMaxSize = PageMaxSpace / 2 - 1; // MaxNodeSize is usually persisted as an unsigned short. Therefore, we must ensure it is not possible to have an overflow. Debug.Assert(NodeMaxSize < ushort.MaxValue); _increaseSize = MinIncreaseSize; PageMinSpace = (int)(PageMaxSpace * 0.33); SetPagerState(new PagerState(this)); }
public ScratchBufferFile(AbstractPager scratchPager, int scratchNumber) { _scratchPager = scratchPager; _scratchNumber = scratchNumber; _allocatedPagesCount = 0; scratchPager.AllocatedInBytesFunc = () => AllocatedPagesCount * Constants.Storage.PageSize; _strongRefToAllocateInBytesFunc = new StrongReference <Func <long> > { Value = scratchPager.AllocatedInBytesFunc }; MemoryInformation.DirtyMemoryObjects.TryAdd(_strongRefToAllocateInBytesFunc); _disposeOnceRunner = new DisposeOnce <SingleAttempt>(() => { _strongRefToAllocateInBytesFunc.Value = null; // remove ref (so if there's a left over refs in DirtyMemoryObjects but also function as _disposed = true for racy func invoke) MemoryInformation.DirtyMemoryObjects.TryRemove(_strongRefToAllocateInBytesFunc); _strongRefToAllocateInBytesFunc = null; _scratchPager.PagerState.DiscardOnTxCopy = true; _scratchPager.Dispose(); ClearDictionaries(); }); }
public unsafe ReadWriteCompressedStream(Stream inner, JsonOperationContext.MemoryBuffer alreadyOnBuffer) { Stream innerInput = inner; int valid = alreadyOnBuffer.Valid - alreadyOnBuffer.Used; if (valid > 0) { byte[] buffer = ArrayPool <byte> .Shared.Rent(valid); fixed(byte *pBuffer = buffer) { Memory.Copy(pBuffer, alreadyOnBuffer.Address + alreadyOnBuffer.Used, valid); } innerInput = new ConcatStream(new ConcatStream.RentedBuffer { Buffer = buffer, Offset = 0, Count = valid }, inner); alreadyOnBuffer.Valid = alreadyOnBuffer.Used = 0; // consume all the data from the buffer } _inner = innerInput ?? throw new ArgumentNullException(nameof(inner)); _input = ZstdStream.Decompress(inner); _output = ZstdStream.Compress(inner); _dispose = new DisposeOnce <SingleAttempt>(DisposeInternal); }
public void WithRetryMultiThreadException(int numThreads, int numBatches, int batchSize, int waitTimeMillis) { // The idea of this test is that the disposer will enter lock // until all other threads are waiting for it, and then we will // release an exception. Since all other threads in the batch // should be waiting for it, the counter should be incremented // exactly once per batch. // // There is no way to ensure this (see ABA problem): if the // disposer finishes fast enough, other threads won't see the // exception being thrown, and thus it may be executed more than // once per batch. // // The approach is to make the disposer sleep for long enough so // that other threads have been enqueued by the time it wakes up. RunWithScheduler(numThreads + 1, scheduler => { int counter = 0; var disposer = new DisposeOnce <ExceptionRetry>(() => { Interlocked.Increment(ref counter); // Wait for all work items to either enqueue or run. By the // time we wake up again, the disposer should be waiting, // and all other threads should be stopped waiting for it // to finish. Thread.Sleep(waitTimeMillis); throw new InvalidOperationException("Don't touch me!"); }); for (int batch = 0; batch < numBatches; batch++) { Assert.Equal(counter, batch); using (var resetEvent = new ManualResetEvent(false)) //using (var barrier = new Barrier(batchSize)) { int workItemsPending = batchSize; for (int threadNum = 0; threadNum < batchSize; threadNum++) { var task = new Task(() => { // Make sure all threads reach this piece. It is // not guaranteed by the thread pool WHEN this code // will be run. //barrier.SignalAndWait(); Assert.Throws(typeof(AggregateException), () => disposer.Dispose()); if (Interlocked.Decrement(ref workItemsPending) == 0) { // ReSharper disable once AccessToDisposedClosure resetEvent.Set(); } }); task.Start(scheduler); } // Wait until all work items are finished resetEvent.WaitOne(); // Batch has finished running, verify it hasn't changed more than // necessary. Assert.Equal(counter, batch + 1); } } }); }
public LuceneIndexPersistence(Index index) { _index = index; _logger = LoggingSource.Instance.GetLogger <LuceneIndexPersistence>(index.DocumentDatabase.Name); _suggestionsDirectories = new Dictionary <string, LuceneVoronDirectory>(); _suggestionsIndexSearcherHolders = new Dictionary <string, IndexSearcherHolder>(); _disposeOnce = new DisposeOnce <SingleAttempt>(() => { DisposeWriters(); _lastReader?.Dispose(); _indexSearcherHolder?.Dispose(); _converter?.Dispose(); _directory?.Dispose(); foreach (var directory in _suggestionsDirectories) { directory.Value?.Dispose(); } }); var fields = index.Definition.IndexFields.Values; switch (_index.Type) { case IndexType.AutoMap: _converter = new LuceneDocumentConverter(fields); break; case IndexType.AutoMapReduce: _converter = new LuceneDocumentConverter(fields, reduceOutput: true); break; case IndexType.MapReduce: _converter = new AnonymousLuceneDocumentConverter(fields, _index.IsMultiMap, reduceOutput: true); break; case IndexType.Map: _converter = new AnonymousLuceneDocumentConverter(fields, _index.IsMultiMap); break; case IndexType.JavaScriptMap: _converter = new JintLuceneDocumentConverter(fields); break; case IndexType.JavaScriptMapReduce: _converter = new JintLuceneDocumentConverter(fields, reduceOutput: true); break; case IndexType.Faulty: _converter = null; break; default: throw new NotSupportedException(_index.Type.ToString()); } _fields = fields.ToDictionary(x => x.Name, x => x); _indexSearcherHolder = new IndexSearcherHolder(CreateIndexSearcher, _index._indexStorage.DocumentDatabase); foreach (var field in _fields) { if (!field.Value.HasSuggestions) { continue; } string fieldName = field.Key; _suggestionsIndexSearcherHolders[fieldName] = new IndexSearcherHolder(state => new IndexSearcher(_suggestionsDirectories[fieldName], true, state), _index._indexStorage.DocumentDatabase); } IndexSearcher CreateIndexSearcher(IState state) { lock (this) { var reader = _lastReader; if (reader != null) { if (reader.RefCount <= 0) { reader = null; } else { try { var newReader = reader.Reopen(state); if (newReader != reader) { reader.DecRef(state); } reader = _lastReader = newReader; } catch (Exception e) { if (_logger.IsInfoEnabled) { _logger.Info($"Could not reopen the index reader for index '{_index.Name}'.", e); } // fallback strategy in case of a reader to be closed // before Reopen and DecRef are executed reader = null; } } } if (reader == null) { reader = _lastReader = IndexReader.Open(_directory, readOnly: true, state); } reader.IncRef(); return(new IndexSearcher(reader)); } } }
public LuceneIndexPersistence(Index index) { _index = index; _suggestionsDirectories = new Dictionary <string, LuceneVoronDirectory>(); _suggestionsIndexSearcherHolders = new Dictionary <string, IndexSearcherHolder>(); _disposeOnce = new DisposeOnce <SingleAttempt>(() => { DisposeWriters(); _indexSearcherHolder?.Dispose(); _converter?.Dispose(); _directory?.Dispose(); foreach (var directory in _suggestionsDirectories) { directory.Value?.Dispose(); } }); var fields = index.Definition.IndexFields.Values; switch (_index.Type) { case IndexType.AutoMap: _converter = new LuceneDocumentConverter(fields); break; case IndexType.AutoMapReduce: _converter = new LuceneDocumentConverter(fields, reduceOutput: true); break; case IndexType.MapReduce: _converter = new AnonymousLuceneDocumentConverter(fields, _index.IsMultiMap, reduceOutput: true); break; case IndexType.Map: _converter = new AnonymousLuceneDocumentConverter(fields, _index.IsMultiMap); break; case IndexType.Faulty: _converter = null; break; default: throw new NotSupportedException(_index.Type.ToString()); } _fields = fields.ToDictionary(x => x.Name, x => x); _indexSearcherHolder = new IndexSearcherHolder(state => new IndexSearcher(_directory, true, state), _index._indexStorage.DocumentDatabase); foreach (var field in _fields) { if (!field.Value.HasSuggestions) { continue; } string fieldName = field.Key; _suggestionsIndexSearcherHolders[fieldName] = new IndexSearcherHolder(state => new IndexSearcher(_suggestionsDirectories[fieldName], true, state), _index._indexStorage.DocumentDatabase); } }
public DocumentDatabase(string name, RavenConfiguration configuration, ServerStore serverStore, Action <string> addToInitLog) { Name = name; _logger = LoggingSource.Instance.GetLogger <DocumentDatabase>(Name); _serverStore = serverStore; _addToInitLog = addToInitLog; StartTime = Time.GetUtcNow(); LastAccessTime = Time.GetUtcNow(); Configuration = configuration; Scripts = new ScriptRunnerCache(this, Configuration); _disposeOnce = new DisposeOnce <SingleAttempt>(DisposeInternal); try { TryAcquireWriteLock(); using (_serverStore.ContextPool.AllocateOperationContext(out TransactionOperationContext ctx)) using (ctx.OpenReadTransaction()) { MasterKey = serverStore.GetSecretKey(ctx, Name); var databaseRecord = _serverStore.Cluster.ReadDatabase(ctx, Name); if (databaseRecord != null) { // can happen when we are in the process of restoring a database if (databaseRecord.Encrypted && MasterKey == null) { throw new InvalidOperationException($"Attempt to create encrypted db {Name} without supplying the secret key"); } if (databaseRecord.Encrypted == false && MasterKey != null) { throw new InvalidOperationException($"Attempt to create a non-encrypted db {Name}, but a secret key exists for this db."); } } } QueryMetadataCache = new QueryMetadataCache(); IoChanges = new IoChangesNotifications(); Changes = new DocumentsChanges(); TombstoneCleaner = new TombstoneCleaner(this); DocumentsStorage = new DocumentsStorage(this, addToInitLog); IndexStore = new IndexStore(this, serverStore); QueryRunner = new QueryRunner(this); EtlLoader = new EtlLoader(this, serverStore); ReplicationLoader = new ReplicationLoader(this, serverStore); SubscriptionStorage = new SubscriptionStorage(this, serverStore); Metrics = new MetricCounters(); TxMerger = new TransactionOperationsMerger(this, DatabaseShutdown); HugeDocuments = new HugeDocuments(configuration.PerformanceHints.HugeDocumentsCollectionSize, configuration.PerformanceHints.HugeDocumentSize.GetValue(SizeUnit.Bytes)); ConfigurationStorage = new ConfigurationStorage(this); NotificationCenter = new NotificationCenter.NotificationCenter(ConfigurationStorage.NotificationsStorage, Name, _databaseShutdown.Token); Operations = new Operations.Operations(Name, ConfigurationStorage.OperationsStorage, NotificationCenter, Changes); DatabaseInfoCache = serverStore.DatabaseInfoCache; RachisLogIndexNotifications = new RachisLogIndexNotifications(DatabaseShutdown); CatastrophicFailureNotification = new CatastrophicFailureNotification((environmentId, e) => { serverStore.DatabasesLandlord.CatastrophicFailureHandler.Execute(name, e, environmentId); }); } catch (Exception) { Dispose(); throw; } }
private ZstdStream(Stream inner, bool compression) { _inner = inner ?? throw new ArgumentNullException(nameof(inner)); _compression = compression; _disposeOnce = new DisposeOnce <SingleAttempt>(DisposeInternal); }