public void AddStruct(Slice key, IStructure value, string treeName, ushort?version = null, bool shouldIgnoreConcurrencyExceptions = false) { var batchOperation = BatchOperation.Add(key, value, version, treeName); if (shouldIgnoreConcurrencyExceptions) { batchOperation.SetIgnoreExceptionOnExecution <ConcurrencyException>(); } AddOperation(batchOperation); }
public void Add(Slice key, Slice value, string treeName, ushort?version = null, bool shouldIgnoreConcurrencyExceptions = false) { AssertValidTreeName(treeName); if (value == null) { throw new ArgumentNullException("value"); } var batchOperation = BatchOperation.Add(key, value, version, treeName); if (shouldIgnoreConcurrencyExceptions) { batchOperation.SetIgnoreExceptionOnExecution <ConcurrencyException>(); } AddOperation(batchOperation); }
public void Add(Slice key, Stream value, string treeName, ushort?version = null, bool shouldIgnoreConcurrencyExceptions = false) { AssertValidTreeName(treeName); if (value == null) { throw new ArgumentNullException("value"); } if (value.Length > int.MaxValue) { throw new ArgumentException("Cannot add a value that is over 2GB in size", "value"); } var batchOperation = BatchOperation.Add(key, value, version, treeName); if (shouldIgnoreConcurrencyExceptions) { batchOperation.SetIgnoreExceptionOnExecution <ConcurrencyException>(); } AddOperation(batchOperation); }
public async Task <string> Store(string expectedETag, TransactionalStateMetaData metadata, List <PendingTransactionState <TState> > statesToPrepare, long?commitUpTo, long?abortAfter) { var keyETag = key.ETag.ToString(); if ((!string.IsNullOrWhiteSpace(keyETag) || !string.IsNullOrWhiteSpace(expectedETag)) && keyETag != expectedETag) { throw new ArgumentException(nameof(expectedETag), "Etag does not match"); } // assemble all storage operations into a single batch // these operations must commit in sequence, but not necessarily atomically // so we can split this up if needed var batchOperation = new BatchOperation(logger, key, table); // first, clean up aborted records if (abortAfter.HasValue && states.Count != 0) { while (states.Count > 0 && states[states.Count - 1].Key > abortAfter) { var entity = states[states.Count - 1].Value; await batchOperation.Add(new TableTransactionAction(TableTransactionActionType.Delete, entity.Entity, entity.ETag)).ConfigureAwait(false); key.ETag = batchOperation.KeyETag; states.RemoveAt(states.Count - 1); if (logger.IsEnabled(LogLevel.Trace)) { logger.LogTrace($"{partition}.{entity.RowKey} Delete {entity.TransactionId}"); } } } // second, persist non-obsolete prepare records var obsoleteBefore = commitUpTo.HasValue ? commitUpTo.Value : key.CommittedSequenceId; if (statesToPrepare != null) { foreach (var s in statesToPrepare) { if (s.SequenceId >= obsoleteBefore) { if (FindState(s.SequenceId, out var pos)) { // overwrite with new pending state StateEntity existing = states[pos].Value; existing.TransactionId = s.TransactionId; existing.TransactionTimestamp = s.TimeStamp; existing.TransactionManager = JsonConvert.SerializeObject(s.TransactionManager, this.jsonSettings); existing.SetState(s.State, this.jsonSettings); await batchOperation.Add(new TableTransactionAction(TableTransactionActionType.UpdateReplace, existing.Entity, existing.ETag)).ConfigureAwait(false); key.ETag = batchOperation.KeyETag; if (logger.IsEnabled(LogLevel.Trace)) { logger.LogTrace($"{partition}.{existing.RowKey} Update {existing.TransactionId}"); } } else { var entity = StateEntity.Create(this.jsonSettings, this.partition, s); await batchOperation.Add(new TableTransactionAction(TableTransactionActionType.Add, entity.Entity)).ConfigureAwait(false); key.ETag = batchOperation.KeyETag; states.Insert(pos, new KeyValuePair <long, StateEntity>(s.SequenceId, entity)); if (logger.IsEnabled(LogLevel.Trace)) { logger.LogTrace($"{partition}.{entity.RowKey} Insert {entity.TransactionId}"); } } } } } // third, persist metadata and commit position key.Metadata = JsonConvert.SerializeObject(metadata, this.jsonSettings); if (commitUpTo.HasValue && commitUpTo.Value > key.CommittedSequenceId) { key.CommittedSequenceId = commitUpTo.Value; } if (string.IsNullOrEmpty(this.key.ETag.ToString())) { await batchOperation.Add(new TableTransactionAction(TableTransactionActionType.Add, key)).ConfigureAwait(false); key.ETag = batchOperation.KeyETag; if (logger.IsEnabled(LogLevel.Trace)) { logger.LogTrace($"{partition}.{KeyEntity.RK} Insert. v{this.key.CommittedSequenceId}, {metadata.CommitRecords.Count}c"); } } else { await batchOperation.Add(new TableTransactionAction(TableTransactionActionType.UpdateReplace, key, key.ETag)).ConfigureAwait(false); key.ETag = batchOperation.KeyETag; if (logger.IsEnabled(LogLevel.Trace)) { logger.LogTrace($"{partition}.{KeyEntity.RK} Update. v{this.key.CommittedSequenceId}, {metadata.CommitRecords.Count}c"); } } // fourth, remove obsolete records if (states.Count > 0 && states[0].Key < obsoleteBefore) { FindState(obsoleteBefore, out var pos); for (int i = 0; i < pos; i++) { await batchOperation.Add(new TableTransactionAction(TableTransactionActionType.Delete, states[i].Value.Entity, states[i].Value.ETag)).ConfigureAwait(false); key.ETag = batchOperation.KeyETag; if (logger.IsEnabled(LogLevel.Trace)) { logger.LogTrace($"{partition}.{states[i].Value.RowKey} Delete {states[i].Value.TransactionId}"); } } states.RemoveRange(0, pos); } await batchOperation.Flush().ConfigureAwait(false); if (logger.IsEnabled(LogLevel.Debug)) { logger.LogDebug($"{partition} Stored v{this.key.CommittedSequenceId} eTag={key.ETag}"); } return(key.ETag.ToString()); }
public override Task <T> Execute() { _commands.Add(this); return(Task.FromResult((T)CommandResponse.FromStatus(Status.Queued))); }
protected override void ProtectedLog(ILogInformation info) { _batchProcessor.Add(info); }