private static IDictionary <string, string> CreateCommitHeaders(AggregateBase aggregate, KeyValuePair <string, string>[] extraHeaders) { var commitId = Guid.NewGuid(); var commitHeaders = new Dictionary <string, string> { { MetadataKeys.CommitIdHeader, commitId.ToString() }, { MetadataKeys.AggregateClrTypeHeader, aggregate.GetType().AssemblyQualifiedName }, { MetadataKeys.UserIdentityHeader, Thread.CurrentThread.Name }, // TODO - was Thread.CurrentPrincipal?.Identity?.Name { MetadataKeys.ServerNameHeader, "DefaultServerNameHEader" }, // TODO - was Environment.MachineName { MetadataKeys.ServerClockHeader, DateTime.UtcNow.ToString("o") } }; foreach (var extraHeader in extraHeaders) { commitHeaders[extraHeader.Key] = extraHeader.Value; } return(commitHeaders); }
public async Task <int> SaveAsync(AggregateBase aggregate, params KeyValuePair <string, string>[] extraHeaders) { var streamName = aggregate.Identifier.ToString(); if (Log.IsEnabled(LogEventLevel.Information)) { Log.Information("Saving aggregate {streamName}", streamName); } var pendingEvents = aggregate.GetPendingEvents(); var originalVersion = aggregate.Version - pendingEvents.Count; try { WriteResult result; var commitHeaders = CreateCommitHeaders(aggregate, extraHeaders); var eventsToSave = pendingEvents.Select(x => ToEventData(Guid.NewGuid(), x, commitHeaders)); if (Log.IsEnabled(LogEventLevel.Information)) { Log.Information("{pendingEventsCount} events to write to stream {streamName}...", pendingEvents.Count, streamName); } if (Log.IsEnabled(LogEventLevel.Debug)) { foreach (var evt in pendingEvents) { // Take the hit of serializing twice here as debug logging should only be on in exceptional circumstances Log.Debug("Event Type: {eventType}. Payload: {payload}", evt.GetType().Name, JsonConvert.SerializeObject(evt)); } } var eventBatches = GetEventBatches(eventsToSave); Debug.Assert(eventBatches.Count > 0); if (eventBatches.Count == 1) { // If just one batch write them straight to the Event Store result = await _eventStoreConnection.AppendToStreamAsync(streamName, originalVersion, eventBatches[0]); } else { // If we have more events to save than can be done in one batch according to the WritePageSize, then we need to save them in a transaction to ensure atomicity using (var transaction = await _eventStoreConnection.StartTransactionAsync(streamName, originalVersion)) { if (Log.IsEnabled(LogEventLevel.Information)) { Log.Information("Started transaction {transactionId} for stream {streamName}", transaction.TransactionId, streamName); } foreach (var batch in eventBatches) { await transaction.WriteAsync(batch); } result = await transaction.CommitAsync(); if (Log.IsEnabled(LogEventLevel.Information)) { Log.Information("Transaction {transactionId} committed", transaction.TransactionId); } } } aggregate.ClearPendingEvents(); if (Log.IsEnabled(LogEventLevel.Information)) { Log.Information("Aggregate {streamName} pending events cleaned up", streamName); } return(result.NextExpectedVersion); } catch (Exception ex) { Log.Error($"Failed to write events for stream: {streamName}.", ex); ExceptionDispatchInfo.Capture(ex).Throw(); } return(originalVersion + 1); }