private void NotifyHighDirtyMemoryFailure(List <MergedTransactionCommand> pendingOps, HighDirtyMemoryException exception)
        {
            // set all pending ops exception
            foreach (var pendingOp in pendingOps)
            {
                pendingOp.Exception = exception;
            }

            NotifyOnThreadPool(pendingOps);

            // set operations that are waiting in queue
            var rejectedBuffer = GetBufferForPendingOps();

            while (_operations.TryDequeue(out var operationToReject))
            {
                operationToReject.Exception = exception;
                rejectedBuffer.Add(operationToReject);
            }

            NotifyOnThreadPool(rejectedBuffer);
        }
        private PendingOperations ExecutePendingOperationsInTransaction(
            List <MergedTransactionCommand> pendingOps,
            DocumentsOperationContext context,
            Task previousOperation, ref PerformanceMetrics.DurationMeasurement meter)
        {
            _alreadyListeningToPreviousOperationEnd = false;
            var percentageFromPhysicalMem = _parent.Configuration.Memory.TemporaryDirtyMemoryAllowedPercentage;

            context.TransactionMarkerOffset = 1;  // ensure that we are consistent here and don't use old values
            var sp = Stopwatch.StartNew();

            do
            {
                // RavenDB-7732 - Even if we merged multiple separate operations into
                // a single transaction in Voron, we're still going to have a separate
                // tx marker for them for the purpose of replication, to avoid creating
                // overly large replication batches.
                context.TransactionMarkerOffset++;

                if (TryGetNextOperation(previousOperation, out MergedTransactionCommand op, ref meter) == false)
                {
                    break;
                }

                pendingOps.Add(op);

                var llt = context.Transaction.InnerTransaction.LowLevelTransaction;

                if (_parent.Configuration.Memory.EnableHighTemporaryDirtyMemoryUse)
                {
                    var now = _parent.Time.GetUtcNow();
                    if (now - _lastHighDirtyMemCheck > _timeToCheckHighDirtyMemory.AsTimeSpan) // we do not need to test scratch dirty mem every write
                    {
                        if (MemoryInformation.IsHighDirtyMemory(percentageFromPhysicalMem, out var details))
                        {
                            var highDirtyMemory = new HighDirtyMemoryException(
                                $"Operation was cancelled by the transaction merger for transaction #{llt.Id} due to high dirty memory in scratch files." +
                                $" This might be caused by a slow IO storage. Current memory usage: {details}");

                            foreach (var pendingOp in pendingOps)
                            {
                                pendingOp.Exception = highDirtyMemory;
                            }

                            NotifyOnThreadPool(pendingOps);

                            var rejectedBuffer = GetBufferForPendingOps();
                            while (_operations.TryDequeue(out var operationToReject))
                            {
                                operationToReject.Exception = highDirtyMemory;
                                rejectedBuffer.Add(operationToReject);
                            }
                            NotifyOnThreadPool(rejectedBuffer);
                            break;
                        }
                        _lastHighDirtyMemCheck = now; // reset timer for next check only if no errors (otherwise check every single write until back to normal)
                    }
                }

                meter.IncrementCounter(1);
                meter.IncrementCommands(op.Execute(context, _recording.State));

                var modifiedSize = llt.NumberOfModifiedPages * Constants.Storage.PageSize;

                modifiedSize += llt.TotalEncryptionBufferSize.GetValue(SizeUnit.Bytes);

                var canCloseCurrentTx = previousOperation == null || previousOperation.IsCompleted;
                if (canCloseCurrentTx || _parent.Is32Bits)
                {
                    if (_operations.IsEmpty)
                    {
                        break; // nothing remaining to do, let's us close this work
                    }
                    if (sp.ElapsedMilliseconds > _maxTimeToWaitForPreviousTxInMs)
                    {
                        break; // too much time
                    }
                    if (modifiedSize > _maxTxSizeInBytes)
                    {
                        break; // transaction is too big, let's clean it
                    }
                    // even though we can close the tx, we choose to keep it a bit longer
                    // we want to keep processing operations until we clear the queue, time / size
                    // limits are reached
                    continue;
                }

                // if I can't close the tx, this means that the previous async operation is still in progress
                // and there are incoming requests coming in. We'll accept them, to a certain limit
                if (modifiedSize < _maxTxSizeInBytes)
                {
                    continue; // we can still process requests at this time, so let's do that...
                }
                UnlikelyRejectOperations(previousOperation, sp, llt, modifiedSize);
                break;
            } while (true);

            var status = GetPendingOperationsStatus(context, pendingOps.Count == 0);

            if (_log.IsInfoEnabled)
            {
                var opType = previousOperation == null ? string.Empty : "(async) ";
                _log.Info($"Merged {pendingOps.Count:#,#;;0} operations in {sp.Elapsed} {opType}with {_operations.Count:#,#;;0} operations remaining. Status: {status}");
            }
            return(status);
        }