public async ValueTask ApplyChangesAsync(DocumentSessionBase session, EventSlice <TDoc, TId> slice, CancellationToken cancellation, ProjectionLifecycle lifecycle = ProjectionLifecycle.Inline) { if (Projection.MatchesAnyDeleteType(slice)) { var operation = Storage.DeleteForId(slice.Id, slice.Tenant.TenantId); session.QueueOperation(operation); return; } var aggregate = slice.Aggregate; if (slice.Aggregate == null && lifecycle == ProjectionLifecycle.Inline) { aggregate = await Storage.LoadAsync(slice.Id, session, cancellation).ConfigureAwait(false); } // Does the aggregate already exist before the events are applied? var exists = aggregate != null; foreach (var @event in slice.Events()) { try { aggregate = await ApplyEvent(session, slice, @event, aggregate, cancellation).ConfigureAwait(false); } catch (MartenCommandException) { throw; } catch (NpgsqlException) { throw; } catch (Exception e) { throw new ApplyEventException(@event, e); } } if (aggregate != null) { Storage.SetIdentity(aggregate, slice.Id); } // Delete the aggregate *if* it existed prior to these events if (aggregate == null) { if (exists) { var operation = Storage.DeleteForId(slice.Id, slice.Tenant.TenantId); session.QueueOperation(operation); } return; } session.QueueOperation(Storage.Upsert(aggregate, session, slice.Tenant.TenantId)); }
internal async Task ProcessEventsAsync(DocumentSessionBase session, CancellationToken token) { if (!session._workTracker.Streams.Any()) { return; } if (Options.AutoCreateSchemaObjects != AutoCreate.None) { await session.Database.EnsureStorageExistsAsync(typeof(IEvent), token).ConfigureAwait(false); } var fetcher = new EventSequenceFetcher(this, session.WorkTracker.Streams.Sum(x => x.Events.Count)); var sequences = await session.ExecuteHandlerAsync(fetcher, token).ConfigureAwait(false); var storage = session.EventStorage(); foreach (var stream in session.WorkTracker.Streams.Where(x => x.Events.Any())) { stream.TenantId ??= session.TenantId; if (stream.ActionType == StreamActionType.Start) { stream.PrepareEvents(0, this, sequences, session); session.QueueOperation(storage.InsertStream(stream)); } else { var handler = storage.QueryForStream(stream); var state = await session.ExecuteHandlerAsync(handler, token).ConfigureAwait(false); if (state == null) { stream.PrepareEvents(0, this, sequences, session); session.QueueOperation(storage.InsertStream(stream)); } else { if (state.IsArchived) { throw new InvalidStreamOperationException($"Attempted to append event to archived stream with Id '{state.Id}'."); } stream.PrepareEvents(state.Version, this, sequences, session); session.QueueOperation(storage.UpdateStreamVersion(stream)); } } foreach (var @event in stream.Events) { session.QueueOperation(storage.AppendEvent(this, session, stream, @event)); } } foreach (var projection in _inlineProjections.Value) { await projection.ApplyAsync(session, session.WorkTracker.Streams.ToList(), token).ConfigureAwait(false); } }
internal void ProcessEvents(DocumentSessionBase session) { if (!session.WorkTracker.Streams.Any()) { return; } if (Options.AutoCreateSchemaObjects != AutoCreate.None) { session.Database.EnsureStorageExists(typeof(IEvent)); } var storage = session.EventStorage(); var fetcher = new EventSequenceFetcher(this, session.WorkTracker.Streams.Sum(x => x.Events.Count)); var sequences = session.ExecuteHandler(fetcher); foreach (var stream in session.WorkTracker.Streams) { stream.TenantId ??= session.TenantId; if (stream.ActionType == StreamActionType.Start) { stream.PrepareEvents(0, this, sequences, session); session.QueueOperation(storage.InsertStream(stream)); } else { var handler = storage.QueryForStream(stream); var state = session.ExecuteHandler(handler); if (state == null) { stream.PrepareEvents(0, this, sequences, session); session.QueueOperation(storage.InsertStream(stream)); } else { stream.PrepareEvents(state.Version, this, sequences, session); session.QueueOperation(storage.UpdateStreamVersion(stream)); } } foreach (var @event in stream.Events) { session.QueueOperation(storage.AppendEvent(this, session, stream, @event)); } } foreach (var projection in _inlineProjections.Value) { projection.Apply(session, session.WorkTracker.Streams.ToList()); } }
internal async Task ProcessEventsAsync(DocumentSessionBase session, CancellationToken token) { if (!session._workTracker.Streams.Any()) { return; } // TODO -- we'll optimize this later to batch up queries to the database var fetcher = new EventSequenceFetcher(this, session.WorkTracker.Streams.Sum(x => x.Events.Count)); var sequences = await session.ExecuteHandlerAsync(fetcher, token); var storage = session.EventStorage(); foreach (var stream in session.WorkTracker.Streams) { stream.TenantId ??= session.Tenant.TenantId; if (stream.ActionType == StreamActionType.Start) { stream.PrepareEvents(0, this, sequences, session); session.QueueOperation(storage.InsertStream(stream)); } else { var handler = storage.QueryForStream(stream); var state = await session.ExecuteHandlerAsync(handler, token); if (state == null) { stream.PrepareEvents(0, this, sequences, session); session.QueueOperation(storage.InsertStream(stream)); } else { stream.PrepareEvents(state.Version, this, sequences, session); session.QueueOperation(storage.UpdateStreamVersion(stream)); } } foreach (var @event in stream.Events) { session.QueueOperation(storage.AppendEvent(this, session, stream, @event)); } } foreach (var projection in _inlineProjections.Value) { await projection.ApplyAsync(session, session.WorkTracker.Streams.ToList(), token); } }
private void apply() { var transform = _session.Tenant.TransformFor(StoreOptions.PatchDoc); var storage = _session.StorageFor(typeof(T)); ISqlFragment where; if (_filter == null) { var statement = new StatementOperation(storage, null); statement.ApplyFiltering(_session, _filterExpression); where = statement.Where; } else { where = storage.FilterDocuments(null, _filter); } var operation = new PatchOperation(transform, storage, where, Patch, _session.Serializer) { PossiblyPolymorhpic = PossiblyPolymorphic }; _session.QueueOperation(operation); }
private void apply() { var transform = _session.Tenant.TransformFor(StoreOptions.PatchDoc); var storage = _session.StorageFor(typeof(T)); var where = storage.FilterDocuments(null, _fragment); var operation = new PatchOperation(transform, storage.QueryableDocument, where, Patch, _session.Serializer); _session.QueueOperation(operation); }
public void ArchiveStream(Guid streamId) { var op = new ArchiveStreamOperation(_store.Events, streamId); _session.QueueOperation(op); }