private bool UnlikelyWaitForNextOperationOrPreviousTransactionComplete(Task previousOperation, out MergedTransactionCommand op, ref PerformanceMetrics.DurationMeasurement meter) { if (_alreadyListeningToPreviousOperationEnd == false) { _alreadyListeningToPreviousOperationEnd = true; if (previousOperation.IsCompleted) { _waitHandle.Set(); } else { previousOperation.ContinueWith(_ => _waitHandle.Set(), _shutdown); } } while (true) { try { meter.MarkInternalWindowStart(); _waitHandle.Wait(_shutdown); _waitHandle.Reset(); if (previousOperation.IsCompleted) { op = null; return(false); } if (_operations.TryDequeue(out op)) { return(true); } } finally { meter.MarkInternalWindowEnd(); } } }
private bool TryGetNextOperation(Task previousOperation, out MergedTransactionCommand op, ref PerformanceMetrics.DurationMeasurement meter) { if (_operations.TryDequeue(out op)) { return(true); } if (previousOperation == null || previousOperation.IsCompleted) { return(false); } return(UnlikelyWaitForNextOperationOrPreviousTransactionComplete(previousOperation, out op, ref meter)); }
private PendingOperations ExecutePendingOperationsInTransaction( List <MergedTransactionCommand> pendingOps, DocumentsOperationContext context, Task previousOperation, ref PerformanceMetrics.DurationMeasurement meter) { _alreadyListeningToPreviousOperationEnd = false; var sp = Stopwatch.StartNew(); do { // RavenDB-7732 - Even if we merged multiple seprate operations into // a single transaction in Voron, we're still going to have a separate // tx marker for them for the purpose of replication, to avoid creating // overly large replication batches. context.TransactionMarkerOffset++; if (TryGetNextOperation(previousOperation, out MergedTransactionCommand op, ref meter) == false) { break; } pendingOps.Add(op); meter.IncrementCounter(1); meter.IncreamentCommands(op.Execute(context)); if (previousOperation != null && previousOperation.IsCompleted) { if (_log.IsInfoEnabled) { _log.Info( $"Stopping merged operations because previous transaction async commit completed. Took {sp.Elapsed} with {pendingOps.Count} operations and {_operations.Count} remaining operations"); } return(GetPendingOperationsStatus(context)); } if (sp.ElapsedMilliseconds > MaxTimeToWait) { if (previousOperation != null) { continue; } if (_log.IsInfoEnabled) { _log.Info($"Stopping merged operations because {sp.Elapsed} passed {pendingOps.Count} operations and {_operations.Count} remaining operations"); } return(GetPendingOperationsStatus(context)); } if (IntPtr.Size == sizeof(int) || _parent.Configuration.Storage.ForceUsing32BitsPager) { // we need to be sure that we don't use up too much virtual space var llt = context.Transaction.InnerTransaction.LowLevelTransaction; var modifiedSize = llt.NumberOfModifiedPages * Constants.Storage.PageSize; if (modifiedSize > 4 * Constants.Size.Megabyte) { return(GetPendingOperationsStatus(context)); } } } while (true); if (_log.IsInfoEnabled) { _log.Info($"Merged {pendingOps.Count} operations in {sp.Elapsed} and there is no more work"); } if (context.Transaction.ModifiedSystemDocuments) { return(PendingOperations.ModifiedSystemDocuments); } return(GetPendingOperationsStatus(context, pendingOps.Count == 0)); }
private PendingOperations ExecutePendingOperationsInTransaction( List <MergedTransactionCommand> executedOps, DocumentsOperationContext context, Task previousOperation, ref PerformanceMetrics.DurationMeasurement meter) { _alreadyListeningToPreviousOperationEnd = false; context.TransactionMarkerOffset = 1; // ensure that we are consistent here and don't use old values var sp = Stopwatch.StartNew(); do { // RavenDB-7732 - Even if we merged multiple separate operations into // a single transaction in Voron, we're still going to have a separate // tx marker for them for the purpose of replication, to avoid creating // overly large replication batches. context.TransactionMarkerOffset++; if (TryGetNextOperation(previousOperation, out MergedTransactionCommand op, ref meter) == false) { break; } executedOps.Add(op); var llt = context.Transaction.InnerTransaction.LowLevelTransaction; var dirtyMemoryState = LowMemoryNotification.Instance.DirtyMemoryState; if (dirtyMemoryState.IsHighDirty) { var now = _parent.Time.GetUtcNow(); if (now - _lastHighDirtyMemCheck > _timeToCheckHighDirtyMemory.AsTimeSpan) { // we need to ask for a flush here GlobalFlushingBehavior.GlobalFlusher.Value?.MaybeFlushEnvironment(context.Environment); _lastHighDirtyMemCheck = now; } throw new HighDirtyMemoryException( $"Operation was cancelled by the transaction merger for transaction #{llt.Id} due to high dirty memory in scratch files." + $" This might be caused by a slow IO storage. Current memory usage: " + $"Total Physical Memory: {MemoryInformation.TotalPhysicalMemory}, " + $"Total Scratch Allocated Memory: {new Size(dirtyMemoryState.TotalDirtyInBytes, SizeUnit.Bytes)} " + $"(which is above {_parent.Configuration.Memory.TemporaryDirtyMemoryAllowedPercentage * 100}%)"); } meter.IncrementCounter(1); meter.IncrementCommands(op.Execute(context, _recording.State)); if (op.UpdateAccessTime) { _parent.LastAccessTime = _parent.Time.GetUtcNow(); } var modifiedSize = llt.NumberOfModifiedPages * Constants.Storage.PageSize; modifiedSize += llt.AdditionalMemoryUsageSize.GetValue(SizeUnit.Bytes); var canCloseCurrentTx = previousOperation == null || previousOperation.IsCompleted; if (canCloseCurrentTx || _parent.Is32Bits) { if (_operations.IsEmpty) { break; // nothing remaining to do, let's us close this work } if (sp.ElapsedMilliseconds > _maxTimeToWaitForPreviousTxInMs) { break; // too much time } if (modifiedSize > _maxTxSizeInBytes) { break; // transaction is too big, let's clean it } // even though we can close the tx, we choose to keep it a bit longer // we want to keep processing operations until we clear the queue, time / size // limits are reached continue; } // if I can't close the tx, this means that the previous async operation is still in progress // and there are incoming requests coming in. We'll accept them, to a certain limit if (modifiedSize < _maxTxSizeInBytes) { continue; // we can still process requests at this time, so let's do that... } UnlikelyRejectOperations(previousOperation, sp, llt, modifiedSize); break; } while (true); var currentOperationsCount = _operations.Count; var status = GetPendingOperationsStatus(context, currentOperationsCount == 0); if (_log.IsInfoEnabled) { var opType = previousOperation == null ? string.Empty : "(async) "; _log.Info($"Merged {executedOps.Count:#,#;;0} operations in {sp.Elapsed} {opType}with {currentOperationsCount:#,#;;0} operations remaining. Status: {status}"); } return(status); }
private PendingOperations ExecutePendingOperationsInTransaction( List <MergedTransactionCommand> pendingOps, DocumentsOperationContext context, Task previousOperation, ref PerformanceMetrics.DurationMeasurement meter) { _alreadyListeningToPreviousOperationEnd = false; context.TransactionMarkerOffset = 1; // ensure that we are consistent here and don't use old values var sp = Stopwatch.StartNew(); do { // RavenDB-7732 - Even if we merged multiple seprate operations into // a single transaction in Voron, we're still going to have a separate // tx marker for them for the purpose of replication, to avoid creating // overly large replication batches. context.TransactionMarkerOffset++; if (TryGetNextOperation(previousOperation, out MergedTransactionCommand op, ref meter) == false) { break; } pendingOps.Add(op); meter.IncrementCounter(1); meter.IncrementCommands(op.Execute(context)); var llt = context.Transaction.InnerTransaction.LowLevelTransaction; var modifiedSize = llt.NumberOfModifiedPages * Constants.Storage.PageSize; var canCloseCurrentTx = previousOperation == null || previousOperation.IsCompleted; if (canCloseCurrentTx) { if (_operations.IsEmpty) { break; // nothing remaining to do, let's us close this work } if (sp.ElapsedMilliseconds > _maxTimeToWaitForPreviousTxInMs) { break; // too much time } if (modifiedSize > _maxTxSizeInBytes) { break; // transaction is too big, let's clean it } // even though we can close the tx, we choose to keep it a bit longer // we want to keep processing operations until we clear the queue, time / size // limits are reached continue; } // if I can't close the tx, this means that the previous async operation is stil in progress // and there are incoming requests coming in. We'll accept them, to a certain limit if (modifiedSize < _maxTxSizeInBytes) { continue; // we can still process requests at this time, so let's do that... } UnlikelyRejectOperations(previousOperation, sp, llt, modifiedSize); break; } while (true); var status = GetPendingOperationsStatus(context, pendingOps.Count == 0); if (_log.IsInfoEnabled) { var opType = previousOperation == null ? string.Empty : "(async)"; _log.Info($"Merged {pendingOps.Count:#,#;;0} operations in {sp.Elapsed} {opType} with {_operations.Count:#,#;;0} operations remaining. Status: {status}"); } return(status); }
private PendingOperations ExecutePendingOperationsInTransaction( List <MergedTransactionCommand> pendingOps, DocumentsOperationContext context, Task previousOperation, ref PerformanceMetrics.DurationMeasurement meter) { _alreadyListeningToPreviousOperationEnd = false; var percentageFromPhysicalMem = _parent.Configuration.Memory.TemporaryDirtyMemoryAllowedPercentage; context.TransactionMarkerOffset = 1; // ensure that we are consistent here and don't use old values var sp = Stopwatch.StartNew(); do { // RavenDB-7732 - Even if we merged multiple separate operations into // a single transaction in Voron, we're still going to have a separate // tx marker for them for the purpose of replication, to avoid creating // overly large replication batches. context.TransactionMarkerOffset++; if (TryGetNextOperation(previousOperation, out MergedTransactionCommand op, ref meter) == false) { break; } pendingOps.Add(op); var llt = context.Transaction.InnerTransaction.LowLevelTransaction; if (_parent.Configuration.Memory.EnableHighTemporaryDirtyMemoryUse) { var now = _parent.Time.GetUtcNow(); if (now - _lastHighDirtyMemCheck > _timeToCheckHighDirtyMemory.AsTimeSpan) // we do not need to test scratch dirty mem every write { if (MemoryInformation.IsHighDirtyMemory(percentageFromPhysicalMem, out var details)) { var highDirtyMemory = new HighDirtyMemoryException( $"Operation was cancelled by the transaction merger for transaction #{llt.Id} due to high dirty memory in scratch files." + $" This might be caused by a slow IO storage. Current memory usage: {details}"); foreach (var pendingOp in pendingOps) { pendingOp.Exception = highDirtyMemory; } NotifyOnThreadPool(pendingOps); var rejectedBuffer = GetBufferForPendingOps(); while (_operations.TryDequeue(out var operationToReject)) { operationToReject.Exception = highDirtyMemory; rejectedBuffer.Add(operationToReject); } NotifyOnThreadPool(rejectedBuffer); break; } _lastHighDirtyMemCheck = now; // reset timer for next check only if no errors (otherwise check every single write until back to normal) } } meter.IncrementCounter(1); meter.IncrementCommands(op.Execute(context, _recording.State)); var modifiedSize = llt.NumberOfModifiedPages * Constants.Storage.PageSize; modifiedSize += llt.TotalEncryptionBufferSize.GetValue(SizeUnit.Bytes); var canCloseCurrentTx = previousOperation == null || previousOperation.IsCompleted; if (canCloseCurrentTx || _parent.Is32Bits) { if (_operations.IsEmpty) { break; // nothing remaining to do, let's us close this work } if (sp.ElapsedMilliseconds > _maxTimeToWaitForPreviousTxInMs) { break; // too much time } if (modifiedSize > _maxTxSizeInBytes) { break; // transaction is too big, let's clean it } // even though we can close the tx, we choose to keep it a bit longer // we want to keep processing operations until we clear the queue, time / size // limits are reached continue; } // if I can't close the tx, this means that the previous async operation is still in progress // and there are incoming requests coming in. We'll accept them, to a certain limit if (modifiedSize < _maxTxSizeInBytes) { continue; // we can still process requests at this time, so let's do that... } UnlikelyRejectOperations(previousOperation, sp, llt, modifiedSize); break; } while (true); var status = GetPendingOperationsStatus(context, pendingOps.Count == 0); if (_log.IsInfoEnabled) { var opType = previousOperation == null ? string.Empty : "(async) "; _log.Info($"Merged {pendingOps.Count:#,#;;0} operations in {sp.Elapsed} {opType}with {_operations.Count:#,#;;0} operations remaining. Status: {status}"); } return(status); }