public bool TryCompression(Table table, TableSchema schema) { try { var tx = table._tx; int maxSpace = ZstdLib.GetMaxCompression(RawBuffer.Length); _compressedScope = tx.Allocator.Allocate(maxSpace + OverheadSize, out CompressedBuffer); Compressed = false; var compressionDictionary = tx.LowLevelTransaction.Environment.CompressionDictionariesHolder .GetCompressionDictionaryFor(tx, table.CurrentCompressionDictionaryId); CompressionTried = true; var size = ZstdLib.Compress(RawBuffer.ToReadOnlySpan(), CompressedBuffer.ToSpan(), compressionDictionary); size += WriteVariableSizeIntInReverse(CompressedBuffer.Ptr + size, compressionDictionary.Id); CompressedBuffer.Truncate(size); var compressionRatio = GetCompressionRatio(size, RawBuffer.Length); if (compressionRatio > compressionDictionary.ExpectedCompressionRatio + 10) { // training dictionaries is expensive, only do that if we see that the current compressed // value is significantly worse than the previous one var etagTree = table.GetFixedSizeTree(schema.CompressedEtagSourceIndex); if (ShouldRetrain(etagTree)) { MaybeTrainCompressionDictionary(table, etagTree); } } if (CompressedBuffer.Length >= RawBuffer.Length) { // we compressed too large, so we skip compression here _compressedScope.Dispose(); // Explicitly not disposing this, we need to have the raw buffer // when we do update then insert and the size is too large // RawScope.Dispose(); Compressed = false; return(false); } Compressed = true; return(true); } catch { _compressedScope.Dispose(); RawScope.Dispose(); throw; } }
public virtual void Dispose() { _storageEnvironment?.Dispose(); _options.Dispose(); _allocator.Dispose(); DeleteDirectory(DataDir); _storageEnvironment = null; _options = null; _allocator = null; GC.Collect(GC.MaxGeneration); GC.WaitForPendingFinalizers(); }
public void Dispose() { if (!_isDisposed) { if (_ownsStorageEnvironment) { _env.Dispose(); } _isDisposed = true; _byteStringContext.Dispose(); _edgesSchema.Dispose(); _verticesSchema.Dispose(); } GC.SuppressFinalize(this); }
public void Dispose() { if (_disposed.HasFlag(TxState.Disposed)) { return; } try { if (!Committed && !RolledBack && Flags == TransactionFlags.ReadWrite) { Rollback(); } _disposed |= TxState.Disposed; PersistentContext.FreePageLocator(_pageLocator); } finally { _env.TransactionCompleted(this); foreach (var pagerState in _pagerStates) { pagerState.Release(); } if (JournalFiles != null) { foreach (var journalFile in JournalFiles) { journalFile.Release(); } } _root?.Dispose(); _freeSpaceTree?.Dispose(); _allocator.AllocationFailed -= MarkTransactionAsFailed; if (_disposeAllocator) { _allocator.Dispose(); } OnDispose?.Invoke(this); } }
public void Dispose() { if (_disposed) { return; } if (!Committed && !RolledBack && Flags == TransactionFlags.ReadWrite) { Rollback(); } _disposed = true; if (Flags == TransactionFlags.ReadWrite) { _env.WriteTransactionPool.Reset(); } _env.TransactionCompleted(this); foreach (var pagerState in _pagerStates) { pagerState.Release(); } _root?.Dispose(); _freeSpaceTree?.Dispose(); if (_disposeAllocator) { _allocator.Dispose(); } OnDispose?.Invoke(this); }
public void Cleanup() { _buffers.Clear(); _allocator.Dispose(); }
public void Cleanup() { if (_recycleArea.Count == 0 && _scratchBuffers.Count == 1) { return; } long txIdAllowingToReleaseOldScratches = -1; // ReSharper disable once LoopCanBeConvertedToQuery foreach (var scratchBufferItem in _scratchBuffers) { if (scratchBufferItem.Value == _current) { continue; } txIdAllowingToReleaseOldScratches = Math.Max(txIdAllowingToReleaseOldScratches, scratchBufferItem.Value.File.TxIdAfterWhichLatestFreePagesBecomeAvailable); } ByteStringContext byteStringContext; try { byteStringContext = new ByteStringContext(SharedMultipleUseFlag.None); } catch (Exception e) when(e is OutOfMemoryException || e is EarlyOutOfMemoryException) { return; } try { while (_env.CurrentReadTransactionId <= txIdAllowingToReleaseOldScratches) { // we've just flushed and had no more writes after that, let us bump id of next read transactions to ensure // that nobody will attempt to read old scratches so we will be able to release more files try { using (var tx = _env.NewLowLevelTransaction(new TransactionPersistentContext(), TransactionFlags.ReadWrite, timeout: TimeSpan.FromMilliseconds(500), context: byteStringContext)) { tx.ModifyPage(0); tx.Commit(); } } catch (TimeoutException) { break; } catch (DiskFullException) { break; } } // we need to ensure that no access to _recycleArea and _scratchBuffers will take place in the same time // and only methods that access this are used within write transaction try { using (_env.WriteTransaction(context: byteStringContext)) { RemoveInactiveScratches(_current); RemoveInactiveRecycledScratches(); } } catch (TimeoutException) { } } finally { byteStringContext.Dispose(); } }
public void Cleanup() { if (_recycleArea.Count == 0 && _scratchBuffers.Count == 1) { return; } long txIdAllowingToReleaseOldScratches = -1; // ReSharper disable once LoopCanBeConvertedToQuery foreach (var scratchBufferItem in _scratchBuffers) { if (scratchBufferItem.Value == _current) { continue; } txIdAllowingToReleaseOldScratches = Math.Max(txIdAllowingToReleaseOldScratches, scratchBufferItem.Value.File.TxIdAfterWhichLatestFreePagesBecomeAvailable); } ByteStringContext byteStringContext; try { byteStringContext = new ByteStringContext(SharedMultipleUseFlag.None); } catch (Exception e) when(e is OutOfMemoryException || e is EarlyOutOfMemoryException) { return; } try { while (_env.CurrentReadTransactionId <= txIdAllowingToReleaseOldScratches) { // we've just flushed and had no more writes after that, let us bump id of next read transactions to ensure // that nobody will attempt to read old scratches so we will be able to release more files try { using (var tx = _env.NewLowLevelTransaction(new TransactionPersistentContext(), TransactionFlags.ReadWrite, timeout: TimeSpan.FromMilliseconds(500), context: byteStringContext)) { tx.ModifyPage(0); tx.Commit(); } } catch (TimeoutException) { break; } catch (DiskFullException) { break; } } IDisposable exitPreventNewTransactions = null; try { // we need to ensure that no access to _recycleArea and _scratchBuffers will take place in the same time // and only methods that access this are used within write transaction using (_env.WriteTransaction()) { // additionally we must not allow to start any transaction (even read one) to start because it uses GetPagerStatesOfAllScratches() which // returns _pagerStatesAllScratchesCache that we're updating here if (_env.TryPreventNewTransactions(TimeSpan.Zero, out exitPreventNewTransactions)) { var removedInactive = RemoveInactiveScratches(_current, updateCacheBeforeDisposingScratch: false); // no need to update cache because we're going do to it here anyway var removedInactiveRecycled = RemoveInactiveRecycledScratches(); if (_logger.IsInfoEnabled) { _logger.Info( $"Cleanup of {nameof(ScratchBufferPool)} removed: {removedInactive} inactive scratches and {removedInactiveRecycled} inactive from the recycle area"); } _forTestingPurposes?.ActionToCallDuringCleanupRightAfterRemovingInactiveScratches?.Invoke(); UpdateCacheForPagerStatesOfAllScratches(); // it's going to be called by Rollback() of the write tx but let's call it explicitly so we can easily find this usage } } } catch (TimeoutException) { } catch (DiskFullException) { } finally { exitPreventNewTransactions?.Dispose(); } } finally { byteStringContext.Dispose(); } }
public void Dispose() { _firstScope.Dispose(); _lastScope.Dispose(); }