private async Task <EventStoreTransaction> StartTransaction(string stream, long expectedVersion) { var transaction = await _eventStore.StartTransactionAsync(stream, expectedVersion); _transactionDictionary.Add(stream, transaction.TransactionId); return(transaction); }
public async Task <StreamWriteResult> SaveEvents(StreamId streamId, long streamVersion, List <IDomainEvent> events) { if (events.Any() == false) { return(new StreamWriteResult(-1)); } var commitId = Guid.NewGuid(); var expectedVersion = streamVersion == 0 ? ExpectedVersion.NoStream : streamVersion - 1; var eventsToSave = events.Select(domainEvent => ToEventData(commitId, domainEvent)).ToList(); if (eventsToSave.Count < WriteBatchSize) { var result = await _connection.AppendToStreamAsync(streamId.ToString(), expectedVersion, eventsToSave); return(new StreamWriteResult(result.NextExpectedVersion)); } using (var transaction = await _connection.StartTransactionAsync(streamId.ToString(), expectedVersion)) { var position = 0; while (position < eventsToSave.Count) { var pageEvents = eventsToSave.Skip(position).Take(WriteBatchSize); await transaction.WriteAsync(pageEvents); position += WriteBatchSize; } var result = await transaction.CommitAsync(); return(new StreamWriteResult(result.NextExpectedVersion)); } }
private async Task AppendEventsInternalAsync(string streamName, long expectedVersion, ICollection <EventData> events) { using (Profiler.TraceMethod <GetEventStore>(nameof(AppendAsync))) { Guard.NotNullOrEmpty(streamName, nameof(streamName)); Guard.NotNull(events, nameof(events)); if (events.Count == 0) { return; } var eventsToSave = events.Select(Formatter.Write).ToList(); if (eventsToSave.Count < WritePageSize) { await connection.AppendToStreamAsync(GetStreamName(streamName), expectedVersion, eventsToSave); } else { using (var transaction = await connection.StartTransactionAsync(GetStreamName(streamName), expectedVersion)) { for (var p = 0; p < eventsToSave.Count; p += WritePageSize) { await transaction.WriteAsync(eventsToSave.Skip(p).Take(WritePageSize)); } await transaction.CommitAsync(); } } } }
public static EventStoreTransaction StartTransaction(this IEventStoreConnection con, string stream, int expectedVersion, UserCredentials userCredentials = null) { var task = con.StartTransactionAsync(stream, expectedVersion, userCredentials); task.Wait(); return(task.Result); }
protected override async Task <IReadOnlyCollection <ICommittedDomainEvent> > CommitEventsAsync <TAggregate, TIdentity>( TIdentity id, IReadOnlyCollection <SerializedEvent> serializedEvents, CancellationToken cancellationToken) { var aggregateName = typeof(TAggregate).Name; var committedDomainEvents = serializedEvents .Select(e => new EventStoreEvent { AggregateSequenceNumber = e.AggregateSequenceNumber, Metadata = e.SerializedMetadata, AggregateId = id.Value, AggregateName = aggregateName, Data = e.SerializedData }) .ToList(); var expectedVersion = Math.Max(serializedEvents.Min(e => e.AggregateSequenceNumber) - 1, 0); var eventDatas = serializedEvents .Select(e => { var guid = Guid.Parse(e.Metadata["guid"]); var eventType = string.Format("{0}.{1}.{2}", aggregateName, e.Metadata.EventName, e.Metadata.EventVersion); var data = Encoding.UTF8.GetBytes(e.SerializedData); var meta = Encoding.UTF8.GetBytes(e.SerializedMetadata); return(new EventData(guid, eventType, true, data, meta)); }) .ToList(); try { using (var transaction = await _connection.StartTransactionAsync( id.Value, expectedVersion == 0 ? ExpectedVersion.NoStream : expectedVersion) .ConfigureAwait(false)) { await transaction.WriteAsync(eventDatas).ConfigureAwait(false); var writeResult = await transaction.CommitAsync().ConfigureAwait(false); Log.Verbose( "Wrote aggregate {0} with version {1} ({2},{3})", aggregateName, writeResult.NextExpectedVersion - 1, writeResult.LogPosition.CommitPosition, writeResult.LogPosition.PreparePosition); } } catch (WrongExpectedVersionException e) { throw new OptimisticConcurrencyException(e.Message, e); } return(committedDomainEvents); }
private EventStoreTransaction GetTransaction(string streamId, int expectedVersion) { EventStoreTransaction transaction; if (!_transactions.TryGetValue(streamId, out transaction)) { _logger.Debug(this, $"Starting new transaction for stream {streamId} at expected version {expectedVersion}"); transaction = _eventStoreConnection.StartTransactionAsync(streamId, expectedVersion).Result; _transactions.Add(streamId, transaction); } return(transaction); }
public async void Save(IAggregate aggregate, Guid commitId, IDictionary <string, object> updateHeaders = null) { // standard data for metadata portion of persisted event var commitHeaders = new Dictionary <string, object> { // handy tracking id { CommitIdHeader, commitId }, // type of aggregate being persisted { AggregateClrTypeHeader, aggregate.GetType().AssemblyQualifiedName } }; // add extra data to metadata portion of presisted event commitHeaders = (updateHeaders ?? new Dictionary <string, object>()) .Concat(commitHeaders) .GroupBy(d => d.Key) .ToDictionary(d => d.Key, d => d.First().Value); // streamname is created by func, by default agg type concat to agg id var streamName = _aggregateIdToStreamName(aggregate.GetType(), aggregate.Id); // get all uncommitted events var newEvents = aggregate.GetUncommittedEvents().Cast <object>().ToList(); // process events so they fit the expectations of GES var eventsToSave = newEvents.Select(e => ToEventData(Guid.NewGuid(), e, commitHeaders)).ToList(); // calculate the expected version of the agg root in event store to detirmine if concurrency conflict var originalVersion = aggregate.Version - newEvents.Count; var expectedVersion = originalVersion == 0 ? ExpectedVersion.NoStream : originalVersion - 1; // if numberr of events to save is small enough it can happen in one call if (eventsToSave.Count < WritePageSize) { await _eventStoreConnection.AppendToStreamAsync(streamName, expectedVersion, eventsToSave); } // otherwise batch events and start transaction else { var transaction = await _eventStoreConnection.StartTransactionAsync(streamName, expectedVersion); var position = 0; while (position < eventsToSave.Count) { var pageEvents = eventsToSave.Skip(position).Take(WritePageSize); await transaction.WriteAsync(pageEvents); position += WritePageSize; } await transaction.CommitAsync(); } aggregate.ClearUncommittedEvents(); }
public async Task SaveAsync(T eventSourced, string correlationId, bool ignoreVersionCheck) { // Guarantee incremental versions var expectedVersion = ignoreVersionCheck ? ExpectedVersion.Any : eventSourced.Version - eventSourced.Events.Count(); var events = eventSourced.Events.Select(e => Serialize(e, correlationId)); using (var transaction = await _eventStore.StartTransactionAsync(GetEventStoreStream(eventSourced.Id), expectedVersion)) { await transaction.WriteAsync(events); await transaction.CommitAsync(); } }
private async Task SaveAggregatesEvents() { foreach (var aggregate in _trackedAggregates) { using (var transaction = await _context.StartTransactionAsync(StreamNameGenerator.Generate(aggregate), aggregate.CurrentVersion)) { await transaction.WriteAsync(aggregate.GetUnCommittedChanges().ToEventData()); await transaction.CommitAsync(); await _snapshotter.TakeSnapshotIfNeed(aggregate); } } }
private async Task WriteEventsInPages(string streamName, int expectedStreamVersion, List <EventData> eventsToSave) { var transaction = await connection.StartTransactionAsync(streamName, expectedStreamVersion); var position = 0; while (position < eventsToSave.Count) { var pageEvents = eventsToSave.Skip(position).Take(WritePageSize); await transaction.WriteAsync(pageEvents); position += WritePageSize; } await transaction.CommitAsync(); }
public void Save(IAggregate aggregate, Guid commitId, Action <IDictionary <string, object> > updateHeaders) { var commitHeaders = new Dictionary <string, object> { { CommitIdHeader, commitId }, { AggregateClrTypeHeader, aggregate.GetType().AssemblyQualifiedName } }; updateHeaders(commitHeaders); var streamName = _aggregateIdToStreamName(aggregate.GetType(), aggregate.Id); var newEvents = aggregate.GetUncommittedEvents().Cast <object>().ToList(); var originalVersion = aggregate.Version - newEvents.Count; var expectedVersion = originalVersion == 0 ? ExpectedVersion.NoStream : originalVersion - 1; var eventsToSave = newEvents.Select(e => ToEventData(Guid.NewGuid(), e, commitHeaders)).ToList(); if (eventsToSave.Count < WritePageSize) { _eventStoreConnection.AppendToStreamAsync(streamName, expectedVersion, eventsToSave).Wait(); } else { var transaction = _eventStoreConnection.StartTransactionAsync(streamName, expectedVersion).Result; var position = 0; while (position < eventsToSave.Count) { var pageEvents = eventsToSave.Skip(position).Take(WritePageSize); transaction.WriteAsync(pageEvents).Wait(); position += WritePageSize; } transaction.CommitAsync().Wait(); } if (_outBus != null) { foreach (var evt in newEvents) { try { _outBus.Publish((Message)evt); } catch { }//TODO: see if we need to do something here } } aggregate.ClearUncommittedEvents(); }
public async Task SaveAsync <TAggregate>(TAggregate aggregate) where TAggregate : Aggregate { var commitHeaders = new Dictionary <string, object> { { CommitIdHeader, aggregate.Id }, { AggregateClrTypeHeader, aggregate.GetType().AssemblyQualifiedName } }; var streamName = AggregateIdToStreamName(aggregate.GetType(), aggregate.Id); var eventsToPublish = aggregate.GetUncommittedEvents(); var newEvents = eventsToPublish.Cast <object>().ToList(); var originalVersion = aggregate.Version - newEvents.Count; var expectedVersion = originalVersion == -1 ? ExpectedVersion.NoStream : originalVersion; var eventsToSave = newEvents.Select(e => ToEventData(Guid.NewGuid(), e, commitHeaders)).ToList(); if (eventsToSave.Count < WritePageSize) { await eventStoreConnection.AppendToStreamAsync(streamName, expectedVersion, eventsToSave); } else { var transaction = await eventStoreConnection.StartTransactionAsync(streamName, expectedVersion); var position = 0; while (position < eventsToSave.Count) { var pageEvents = eventsToSave.Skip(position).Take(WritePageSize); await transaction.WriteAsync(pageEvents); position += WritePageSize; } await transaction.CommitAsync(); } if (bus != null) { foreach (var e in eventsToPublish) { bus.Publish(e); } } aggregate.MarkEventsAsCommitted(); }
public async Task <long> SaveAsync(AggregateBase aggregate, params KeyValuePair <string, string>[] extraHeaders) { var streamName = aggregate.Identifier.ToString(); var pendingEvents = aggregate.GetPendingEvents(); var originalVersion = aggregate.Version - pendingEvents.Count; try { WriteResult result; var commitHeaders = CreateCommitHeaders(aggregate, extraHeaders); var eventsToSave = pendingEvents.Select(x => ToEventData(Guid.NewGuid(), x, commitHeaders)); var eventBatches = GetEventBatches(eventsToSave); if (eventBatches.Count == 1) { result = await _eventStoreConnection.AppendToStreamAsync(streamName, originalVersion, eventBatches[0]); } else { // If we have more events to save than can be done in one batch according to the WritePageSize, then we need to save them in a transaction to ensure atomicity using (var transaction = await _eventStoreConnection.StartTransactionAsync(streamName, originalVersion)) { foreach (var batch in eventBatches) { await transaction.WriteAsync(batch); } result = await transaction.CommitAsync(); } } aggregate.ClearPendingEvents(); return(result.NextExpectedVersion); } catch (Exception ex) { _logger.LogError($"Failed to write events for stream: {streamName}.", ex); ExceptionDispatchInfo.Capture(ex).Throw(); } return(originalVersion + 1); }
public void SetUp() { _firstEvent = TestEvent.NewTestEvent(); _connection = BuildConnection(); _connection.ConnectAsync().Wait(); _stream = TestContext.CurrentContext.Test.FullName; Assert.AreEqual(2, _connection.AppendToStreamAsync(_stream, ExpectedVersion.NoStream, _firstEvent, TestEvent.NewTestEvent(), TestEvent.NewTestEvent()).Result.NextExpectedVersion); using (var transaction = _connection.StartTransactionAsync(_stream, 2).Result) { Assert.AreEqual(2, transaction.CommitAsync().Result.NextExpectedVersion); } }
public void SaveEvents(AggregateKey key, int expectedRevision, IEnumerable <DomainEvent> uncommittedEvents) { var commitHeaders = new Dictionary <string, object> { { CommitIdHeader, Guid.NewGuid() }, { AggregateClrTypeHeader, key.AggregateType.AssemblyQualifiedName } }; var streamName = _aggregateIdToStreamName(key.AggregateType, key.AggregateId); var newEvents = uncommittedEvents.Cast <object>().ToList(); var expectedVersion = expectedRevision <= 0 ? ExpectedVersion.NoStream : expectedRevision - 1; expectedVersion = ExpectedVersion.Any; var preparedEvents = PrepareEvents(newEvents, commitHeaders).ToList(); if (preparedEvents.Count < WritePageSize) { _eventStoreConnection .AppendToStreamAsync(streamName, expectedVersion, preparedEvents) .Wait(); } else { var transaction = _eventStoreConnection .StartTransactionAsync(streamName, expectedVersion) .Result; var position = 0; while (position < preparedEvents.Count) { var pageEvents = preparedEvents.Skip(position).Take(WritePageSize); transaction.WriteAsync(pageEvents).Wait(); position += WritePageSize; } transaction.CommitAsync().Wait(); } foreach (var @event in uncommittedEvents) { _publisher.Publish(@event); } }
protected override void SaveAggregate(Aggregate aggregate, Guid transactionId) { //Taken from https://github.com/pgermishuys/getting-started-with-event-store/blob/master/src/GetEventStoreRepository/GetEventStoreRepository.cs var commitHeaders = new Dictionary <string, object> { { CommitIdHeader, transactionId }, { AggregateClrTypeHeader, aggregate.GetType().AssemblyQualifiedName } }; IList <Event> newEvents = aggregate.GetUncommittedEvents(); int originalVersion = aggregate.Version - newEvents.Count; int expectedVersion = originalVersion; //http://stackoverflow.com/a/20204729 if (originalVersion == 0) { expectedVersion = ExpectedVersion.NoStream; } IEnumerable <EventData> eventsToSave = newEvents.Select(e => ToEventData(Guid.NewGuid(), e, commitHeaders)); string streamName = GetStreamName(aggregate.GetType(), aggregate.Id); if (eventsToSave.Count() < WritePageSize) { connection.AppendToStreamAsync(streamName, ExpectedVersion.Any, eventsToSave).Wait(); } else { var transaction = connection.StartTransactionAsync(streamName, expectedVersion).Result; var position = 0; while (position < eventsToSave.Count()) { var pageEvents = eventsToSave.Skip(position).Take(WritePageSize); transaction.WriteAsync(pageEvents).Wait(); position += WritePageSize; } transaction.CommitAsync().Wait(); } aggregate.ClearUncommittedEvents(); }
public async Task <long> SaveAsync(Aggregate aggregate, params KeyValuePair <string, string>[] extraHeaders) { var streamName = aggregate.Identifier.ToString(); var uncommittedEvents = aggregate.GetUncommittedEvents(); var originalVersion = aggregate.Version - uncommittedEvents.Count; try { WriteResult result; var commitHeaders = CreateCommitHeaders(aggregate, extraHeaders); var eventsToCommit = uncommittedEvents.Select(x => ToEventData(Guid.NewGuid(), x, commitHeaders)); var eventBatches = GetEventBatches(eventsToCommit); if (eventBatches.Count == 1) { result = await _eventStoreConnection.AppendToStreamAsync(streamName, originalVersion, eventBatches[0]); } else { using (var transaction = await _eventStoreConnection.StartTransactionAsync(streamName, originalVersion)) { foreach (var batch in eventBatches) { await transaction.WriteAsync(batch); } result = await transaction.CommitAsync(); } } aggregate.ClearUncommittedEvents(); return(result.NextExpectedVersion); } catch (Exception ex) { ExceptionDispatchInfo.Capture(ex).Throw(); } return(originalVersion + 1); }
public override void SetUp() { base.SetUp(); _node = new MiniNode(PathName); _node.Start(); _firstEvent = TestEvent.NewTestEvent(); _connection = BuildConnection(_node); _connection.ConnectAsync().Wait(); Assert.AreEqual(2, _connection.AppendToStreamAsync("test-stream", ExpectedVersion.NoStream, _firstEvent, TestEvent.NewTestEvent(), TestEvent.NewTestEvent()).Result.NextExpectedVersion); using (var transaction = _connection.StartTransactionAsync("test-stream", 2).Result) { Assert.AreEqual(2, transaction.CommitAsync().Result.NextExpectedVersion); } }
public override void SetUp() { base.SetUp(); _node = new MiniNode(PathName); _node.Start(); _firstEvent = TestEvent.NewTestEvent(); _connection = TestConnection.Create(_node.TcpEndPoint); _connection.ConnectAsync().Wait(); Assert.AreEqual(2, _connection.AppendToStreamAsync("test-stream", ExpectedVersion.NoStream, _firstEvent, TestEvent.NewTestEvent(), TestEvent.NewTestEvent()).Result.NextExpectedVersion); using (var transaction = _connection.StartTransactionAsync("test-stream", 2).Result) { Assert.AreEqual(2, transaction.Commit().NextExpectedVersion); } }
protected async Task SaveEventBatch(string?streamId, long expectedVersion, IEnumerable <EventData> eventsToSave) { WriteResult writeResult; var eventBatches = GetEventBatches(eventsToSave); foreach (var batch in eventBatches) { if (batch.Length == 1) { writeResult = await _eventStoreConnection.AppendToStreamAsync(streamId, expectedVersion, batch.Single()); } else { using var transaction = await _eventStoreConnection.StartTransactionAsync(streamId, expectedVersion); await transaction.WriteAsync(batch); writeResult = await transaction.CommitAsync(); } } }
private async Task AppendEventsInternalAsync(string streamName, long expectedVersion, ICollection <EventData> events) { Guard.NotNullOrEmpty(streamName); Guard.NotNull(events); using (Profiler.TraceMethod <GetEventStore>(nameof(AppendAsync))) { if (events.Count == 0) { return; } try { var eventsToSave = events.Select(x => Formatter.Write(x, serializer)).ToList(); if (eventsToSave.Count < WritePageSize) { await connection.AppendToStreamAsync(GetStreamName(streamName), expectedVersion, eventsToSave); } else { using (var transaction = await connection.StartTransactionAsync(GetStreamName(streamName), expectedVersion)) { for (var p = 0; p < eventsToSave.Count; p += WritePageSize) { await transaction.WriteAsync(eventsToSave.Skip(p).Take(WritePageSize)); } await transaction.CommitAsync(); } } } catch (WrongExpectedVersionException ex) { throw new WrongEventVersionException(ParseVersion(ex.Message), expectedVersion); } } }
public async Task Save(IAggregate aggregate, Guid commitId, Action <IDictionary <string, object> > updateHeaders) { var commitHeaders = new Dictionary <string, object> { { CommitIdHeader, commitId }, { CommitDateHeader, DateTime.UtcNow }, { AggregateClrTypeHeader, aggregate.GetType().AssemblyQualifiedName } }; updateHeaders(commitHeaders); var streamName = aggregateIdToStreamName(aggregate.GetType(), aggregate.Id.Value); var newEvents = aggregate.GetUncommittedEvents().Cast <object>().ToList(); var originalVersion = aggregate.Version - newEvents.Count; var expectedVersion = originalVersion == 0 ? ExpectedVersion.NoStream : originalVersion - 1; var eventsToSave = newEvents.Select(e => ToEventData(Guid.NewGuid(), e, commitHeaders)).ToList(); if (eventsToSave.Count < WritePageSize) { eventStoreConnection.AppendToStreamAsync(streamName, expectedVersion, eventsToSave).Wait(); } else { var transaction = eventStoreConnection.StartTransactionAsync(streamName, expectedVersion).Result; var position = 0; while (position < eventsToSave.Count) { var pageEvents = eventsToSave.Skip(position).Take(WritePageSize); await transaction.WriteAsync(pageEvents); position += WritePageSize; } await transaction.CommitAsync(); transaction.Dispose(); } aggregate.ClearUncommittedEvents(); }
public override async Task SetUp() { await base.SetUp(); _node = new MiniNode <TLogFormat, TStreamId>(PathName); await _node.Start(); _firstEvent = TestEvent.NewTestEvent(); _connection = BuildConnection(_node); await _connection.ConnectAsync(); Assert.AreEqual(2, (await _connection.AppendToStreamAsync("test-stream", ExpectedVersion.NoStream, _firstEvent, TestEvent.NewTestEvent(), TestEvent.NewTestEvent())).NextExpectedVersion); using (var transaction = await _connection.StartTransactionAsync("test-stream", 2)) { Assert.AreEqual(2, (await transaction.CommitAsync()).NextExpectedVersion); } }
public async Task <int> Save(IAggregate aggregate, bool concurrencyCheck = true) { var streamName = StreamName($"{aggregate.GetType().Name }-{aggregate.Id}"); var pendingEvents = aggregate.GetPendingEvents(); var originalVersion = concurrencyCheck ? aggregate.Version - pendingEvents.Count : ExpectedVersion.Any; WriteResult result; var commitHeaders = CreateCommitHeaders(aggregate); var eventsToSave = pendingEvents.Select(x => ToEventData(Guid.NewGuid(), x, commitHeaders)); var eventBatches = GetEventBatches(eventsToSave); if (eventBatches.Count == 1) { // If just one batch write them straight to the Event Store result = await _eventStoreConnection.AppendToStreamAsync(streamName, originalVersion, eventBatches[0]); } else { // If we have more events to save than can be done in one batch according to the WritePageSize, then we need to save them in a transaction to ensure atomicity using (var transaction = await _eventStoreConnection.StartTransactionAsync(streamName, originalVersion)) { foreach (var batch in eventBatches) { await transaction.WriteAsync(batch); } result = await transaction.CommitAsync(); } } aggregate.ClearPendingEvents(); return((int)result.NextExpectedVersion); }
private async Task SaveAndNotify(CancellationToken cancellationToken, IEvent @event) { var currentVersion = @event.Version - 2; var expectedVersion = @event.Version - 1; var t = await _connection.StartTransactionAsync(@event.Id.ToString(), expectedVersion); try { await _connection.AppendToStreamAsync(@event.Id.ToString(), currentVersion, CreateEventData(@event)); await _publisher.Publish(@event, cancellationToken); await t.CommitAsync(); } catch (Exception) { t.Rollback(); } finally { t.Dispose(); } }
public async Task Save(T aggregate) { var commitHeaders = new Dictionary <string, object> { //{CommitIdHeader, aggregate.Id}, { AggregateClrTypeHeader, aggregate.GetType().AssemblyQualifiedName } }; var streamName = AggregateIdToStreamName(aggregate.GetType(), aggregate.Id); var newEvents = aggregate.GetUncommittedChanges().Cast <object>().ToList(); var originalVersion = aggregate.Version - newEvents.Count; var expectedVersion = originalVersion < 0 ? ExpectedVersion.NoStream : originalVersion; var preparedEvents = newEvents.Select(e => EventSerializer.Create(Guid.NewGuid(), e, commitHeaders)).ToList(); if (preparedEvents.Count < WritePageSize) { await _eventStoreConnection.AppendToStreamAsync(streamName, expectedVersion, preparedEvents); } else { var transaction = await _eventStoreConnection.StartTransactionAsync(streamName, expectedVersion); var position = 0; while (position < preparedEvents.Count) { var pageEvents = preparedEvents.Skip(position).Take(WritePageSize); await transaction.WriteAsync(pageEvents); position += WritePageSize; } await transaction.CommitAsync(); } aggregate.MarkChangesAsCommitted(); }
public OngoingTransaction StartTransaction(int expectedVersion) { return(new OngoingTransaction(_store.StartTransactionAsync(_stream, expectedVersion).Result)); }
protected EventStoreTransaction TransStart(string streamId, string login, string password) { return(Connection.StartTransactionAsync(streamId, ExpectedVersion.Any, login == null && password == null ? null : new UserCredentials(login, password)) .Result); }
public async Task <OngoingTransaction> StartTransaction(long expectedVersion) { return(new OngoingTransaction(await _store.StartTransactionAsync(_stream, expectedVersion))); }
public async Task <int> SaveAsync(AggregateBase aggregate, params KeyValuePair <string, string>[] extraHeaders) { var streamName = aggregate.Identifier.ToString(); if (Log.IsEnabled(LogEventLevel.Information)) { Log.Information("Saving aggregate {streamName}", streamName); } var pendingEvents = aggregate.GetPendingEvents(); var originalVersion = aggregate.Version - pendingEvents.Count; try { WriteResult result; var commitHeaders = CreateCommitHeaders(aggregate, extraHeaders); var eventsToSave = pendingEvents.Select(x => ToEventData(Guid.NewGuid(), x, commitHeaders)); if (Log.IsEnabled(LogEventLevel.Information)) { Log.Information("{pendingEventsCount} events to write to stream {streamName}...", pendingEvents.Count, streamName); } if (Log.IsEnabled(LogEventLevel.Debug)) { foreach (var evt in pendingEvents) { // Take the hit of serializing twice here as debug logging should only be on in exceptional circumstances Log.Debug("Event Type: {eventType}. Payload: {payload}", evt.GetType().Name, JsonConvert.SerializeObject(evt)); } } var eventBatches = GetEventBatches(eventsToSave); Debug.Assert(eventBatches.Count > 0); if (eventBatches.Count == 1) { // If just one batch write them straight to the Event Store result = await _eventStoreConnection.AppendToStreamAsync(streamName, originalVersion, eventBatches[0]); } else { // If we have more events to save than can be done in one batch according to the WritePageSize, then we need to save them in a transaction to ensure atomicity using (var transaction = await _eventStoreConnection.StartTransactionAsync(streamName, originalVersion)) { if (Log.IsEnabled(LogEventLevel.Information)) { Log.Information("Started transaction {transactionId} for stream {streamName}", transaction.TransactionId, streamName); } foreach (var batch in eventBatches) { await transaction.WriteAsync(batch); } result = await transaction.CommitAsync(); if (Log.IsEnabled(LogEventLevel.Information)) { Log.Information("Transaction {transactionId} committed", transaction.TransactionId); } } } aggregate.ClearPendingEvents(); if (Log.IsEnabled(LogEventLevel.Information)) { Log.Information("Aggregate {streamName} pending events cleaned up", streamName); } return(result.NextExpectedVersion); } catch (Exception ex) { Log.Error($"Failed to write events for stream: {streamName}.", ex); ExceptionDispatchInfo.Capture(ex).Throw(); } return(originalVersion + 1); }
private static async Task <IWrappedTransaction> CreateTransaction(IEventStoreConnection connection, string identifier, long expectedVersion) { var transaction = await connection.StartTransactionAsync(identifier, expectedVersion).ConfigureAwait(false); return(new WrappedTransaction(transaction)); }