public async Task <IEventStream <TDoc> > FetchForWriting(DocumentSessionBase session, TId id, bool forUpdate, CancellationToken cancellation = default) { var selector = await _identityStrategy.EnsureAggregateStorageExists <TDoc>(session, cancellation).ConfigureAwait(false); if (forUpdate) { await session.BeginTransactionAsync(cancellation).ConfigureAwait(false); } var command = _identityStrategy.BuildCommandForReadingVersionForStream(id, forUpdate); var builder = new CommandBuilder(command); builder.Append(";"); var handler = _identityStrategy.BuildEventQueryHandler(id, selector); handler.ConfigureCommand(builder, session); long version = 0; try { using var reader = await session.ExecuteReaderAsync(builder.Compile(), cancellation).ConfigureAwait(false); if (await reader.ReadAsync(cancellation).ConfigureAwait(false)) { version = await reader.GetFieldValueAsync <long>(0, cancellation).ConfigureAwait(false); } await reader.NextResultAsync(cancellation).ConfigureAwait(false); var events = await handler.HandleAsync(reader, session, cancellation).ConfigureAwait(false); var document = await _aggregator.BuildAsync(events, session, default, cancellation).ConfigureAwait(false);
internal StreamAction Append(DocumentSessionBase session, string stream, params object[] events) { EnsureAsStringStorage(session); if (stream.IsEmpty()) { throw new ArgumentOutOfRangeException(nameof(stream), "The stream key cannot be null or empty"); } var wrapped = events.Select(o => { var e = BuildEvent(o); e.StreamKey = stream; return(e); }).ToArray(); if (session.WorkTracker.TryFindStream(stream, out var eventStream)) { eventStream.AddEvents(wrapped); } else { eventStream = StreamAction.Append(stream, wrapped); session.WorkTracker.Streams.Add(eventStream); } return(eventStream); }
private async Task applyProjectionsAsync(DocumentSessionBase session, ICollection <EventProjection> projections, IEnumerable <TView> views) { var idAssigner = session.Tenant.IdAssignmentFor <TView>(); var resolver = session.Tenant.StorageFor <TView>(); var viewMap = views.ToDictionary(view => (TId)resolver.IdentityFor(view), view => view); foreach (var eventProjection in projections) { var viewId = eventProjection.ViewId; var hasExistingView = viewMap.TryGetValue(viewId, out var view); if (!hasExistingView) { if (eventProjection.Type == ProjectionEventType.CreateAndUpdate) { view = newView(session.Tenant, idAssigner, viewId); viewMap.Add(viewId, view); hasExistingView = true; } } if (eventProjection.Type == ProjectionEventType.CreateAndUpdate || (eventProjection.Type == ProjectionEventType.UpdateOnly && hasExistingView)) { session.Store(view); eventProjection.ProjectTo(session, view).Wait(); } else if (eventProjection.Type == ProjectionEventType.Delete && hasExistingView && await eventProjection.ShouldDelete(session, view)) { session.Delete(view); } } }
internal StreamAction Append(DocumentSessionBase session, Guid stream, params object[] events) { EnsureAsGuidStorage(session); if (stream == Guid.Empty) { throw new ArgumentOutOfRangeException(nameof(stream), "Cannot use an empty Guid as the stream id"); } var wrapped = events.Select(o => { var e = BuildEvent(o); e.StreamId = stream; return(e); }).ToArray(); if (session.WorkTracker.TryFindStream(stream, out var eventStream)) { eventStream.AddEvents(wrapped); } else { eventStream = StreamAction.Append(stream, wrapped); session.WorkTracker.Streams.Add(eventStream); } return(eventStream); }
public async Task <IStorageOperation> DetermineOperation(DocumentSessionBase session, EventSlice <TDoc, TId> slice, CancellationToken cancellation, ProjectionLifecycle lifecycle = ProjectionLifecycle.Inline) { var aggregate = slice.Aggregate; if (slice.Aggregate == null && lifecycle == ProjectionLifecycle.Inline) { aggregate = await Storage.LoadAsync(slice.Id, session, cancellation); } var exists = aggregate != null; foreach (var @event in slice.Events) { aggregate = await ApplyEvent(session, slice, @event, aggregate, cancellation); } if (aggregate != null) { Storage.SetIdentity(aggregate, slice.Id); } if (aggregate == null) { return(exists ? Storage.DeleteForId(slice.Id, slice.Tenant) : null); } return(Storage.Upsert(aggregate, session, slice.Tenant)); }
public async ValueTask ApplyChangesAsync(DocumentSessionBase session, EventSlice <TDoc, TId> slice, CancellationToken cancellation, ProjectionLifecycle lifecycle = ProjectionLifecycle.Inline) { if (Projection.MatchesAnyDeleteType(slice)) { var operation = Storage.DeleteForId(slice.Id, slice.Tenant.TenantId); session.QueueOperation(operation); return; } var aggregate = slice.Aggregate; if (slice.Aggregate == null && lifecycle == ProjectionLifecycle.Inline) { aggregate = await Storage.LoadAsync(slice.Id, session, cancellation).ConfigureAwait(false); } // Does the aggregate already exist before the events are applied? var exists = aggregate != null; foreach (var @event in slice.Events()) { try { aggregate = await ApplyEvent(session, slice, @event, aggregate, cancellation).ConfigureAwait(false); } catch (MartenCommandException) { throw; } catch (NpgsqlException) { throw; } catch (Exception e) { throw new ApplyEventException(@event, e); } } if (aggregate != null) { Storage.SetIdentity(aggregate, slice.Id); } // Delete the aggregate *if* it existed prior to these events if (aggregate == null) { if (exists) { var operation = Storage.DeleteForId(slice.Id, slice.Tenant.TenantId); session.QueueOperation(operation); } return; } session.QueueOperation(Storage.Upsert(aggregate, session, slice.Tenant.TenantId)); }
public override ValueTask ApplyChangesAsync(DocumentSessionBase session, EventSlice <CustomAggregate, int> slice, CancellationToken cancellation, ProjectionLifecycle lifecycle = ProjectionLifecycle.Inline) { var aggregate = slice.Aggregate ?? new CustomAggregate { Id = slice.Id }; foreach (var @event in slice.Events()) { if (@event.Data is CustomEvent e) { switch (e.Letter) { case 'a': aggregate.ACount++; break; case 'b': aggregate.BCount++; break; case 'c': aggregate.CCount++; break; case 'd': aggregate.DCount++; break; } } } session.Store(aggregate); return(new ValueTask()); }
public EventStore(DocumentSessionBase session, DocumentStore store, ITenant tenant) { _session = session; _store = store; _tenant = tenant; }
internal void Start(ActionBlock <IStorageOperation> queue, AggregationRuntime <TDoc, TId> runtime, IDocumentStore store, CancellationToken token) { _session = (DocumentSessionBase)store.LightweightSession(Tenant.TenantId); _builder = new TransformBlock <EventSlice <TDoc, TId>, IStorageOperation>(slice => { try { return(runtime.DetermineOperation(_session, slice, token)); } catch (Exception e) { Debug.WriteLine(e); throw; } }, new ExecutionDataflowBlockOptions { CancellationToken = token }); _builder.LinkTo(queue); _application = Task.Factory.StartNew(async() => { var beingFetched = new List <EventSlice <TDoc, TId> >(); foreach (var slice in Slices) { if (runtime.Projection.MatchesAnyDeleteType(slice)) { var deletion = runtime.Storage.DeleteForId(slice.Id, Tenant); queue.Post(deletion); } else if (runtime.IsNew(slice)) { _builder.Post(slice); } else { beingFetched.Add(slice); } } var ids = beingFetched.Select(x => x.Id).ToArray(); var aggregates = await runtime.Storage .LoadManyAsync(ids, _session, token); var dict = aggregates.ToDictionary(x => runtime.Storage.Identity(x)); foreach (var slice in Slices) { if (dict.TryGetValue(slice.Id, out var aggregate)) { slice.Aggregate = aggregate; } _builder.Post(slice); } }, token); }
internal async Task ProcessEventsAsync(DocumentSessionBase session, CancellationToken token) { if (!session._workTracker.Streams.Any()) { return; } if (Options.AutoCreateSchemaObjects != AutoCreate.None) { await session.Database.EnsureStorageExistsAsync(typeof(IEvent), token).ConfigureAwait(false); } var fetcher = new EventSequenceFetcher(this, session.WorkTracker.Streams.Sum(x => x.Events.Count)); var sequences = await session.ExecuteHandlerAsync(fetcher, token).ConfigureAwait(false); var storage = session.EventStorage(); foreach (var stream in session.WorkTracker.Streams.Where(x => x.Events.Any())) { stream.TenantId ??= session.TenantId; if (stream.ActionType == StreamActionType.Start) { stream.PrepareEvents(0, this, sequences, session); session.QueueOperation(storage.InsertStream(stream)); } else { var handler = storage.QueryForStream(stream); var state = await session.ExecuteHandlerAsync(handler, token).ConfigureAwait(false); if (state == null) { stream.PrepareEvents(0, this, sequences, session); session.QueueOperation(storage.InsertStream(stream)); } else { if (state.IsArchived) { throw new InvalidStreamOperationException($"Attempted to append event to archived stream with Id '{state.Id}'."); } stream.PrepareEvents(state.Version, this, sequences, session); session.QueueOperation(storage.UpdateStreamVersion(stream)); } } foreach (var @event in stream.Events) { session.QueueOperation(storage.AppendEvent(this, session, stream, @event)); } } foreach (var projection in _inlineProjections.Value) { await projection.ApplyAsync(session, session.WorkTracker.Streams.ToList(), token).ConfigureAwait(false); } }
async Task <IEventStorage> IEventIdentityStrategy <Guid> .EnsureAggregateStorageExists <T>( DocumentSessionBase session, CancellationToken cancellation) { var selector = _store.Events.EnsureAsGuidStorage(_session); await session.Database.EnsureStorageExistsAsync(typeof(IEvent), cancellation).ConfigureAwait(false); return(selector); }
IEventStream <TDoc> IEventIdentityStrategy <string> .AppendToStream <TDoc>(TDoc document, DocumentSessionBase session, string id, long version, CancellationToken cancellation) { var action = session.Events.Append(id); action.ExpectedVersionOnServer = version; return(new EventStream <TDoc>(_store.Events, id, document, cancellation, action)); }
public void ApplyFiltering <T>(DocumentSessionBase session, Expression <Func <T, bool> > expression) { var queryExpression = session.Query <T>().Where(expression).Expression; var model = MartenQueryParser.Flyweight.GetParsedQuery(queryExpression); var where = model.BodyClauses.OfType <WhereClause>().Single(); WhereClauses.Add(where); CompileLocal(session); }
public async ValueTask CloseSession() { foreach (var page in _pages) { page.ReleaseSession(); } await _session.DisposeAsync(); _session = null; }
internal StreamAction StartStream(DocumentSessionBase session, Guid id, params object[] events) { EnsureAsGuidStorage(session); var stream = StreamAction.Start(this, id, events); session.WorkTracker.Streams.Add(stream); return(stream); }
internal StreamAction StartStream(DocumentSessionBase session, string streamKey, params object[] events) { EnsureAsStringStorage(session); var stream = StreamAction.Start(streamKey, events); session.UnitOfWork.Streams.Add(stream); return(stream); }
internal void ProcessEvents(DocumentSessionBase session) { if (!session.WorkTracker.Streams.Any()) { return; } if (Options.AutoCreateSchemaObjects != AutoCreate.None) { session.Database.EnsureStorageExists(typeof(IEvent)); } var storage = session.EventStorage(); var fetcher = new EventSequenceFetcher(this, session.WorkTracker.Streams.Sum(x => x.Events.Count)); var sequences = session.ExecuteHandler(fetcher); foreach (var stream in session.WorkTracker.Streams) { stream.TenantId ??= session.TenantId; if (stream.ActionType == StreamActionType.Start) { stream.PrepareEvents(0, this, sequences, session); session.QueueOperation(storage.InsertStream(stream)); } else { var handler = storage.QueryForStream(stream); var state = session.ExecuteHandler(handler); if (state == null) { stream.PrepareEvents(0, this, sequences, session); session.QueueOperation(storage.InsertStream(stream)); } else { stream.PrepareEvents(state.Version, this, sequences, session); session.QueueOperation(storage.UpdateStreamVersion(stream)); } } foreach (var @event in stream.Events) { session.QueueOperation(storage.AppendEvent(this, session, stream, @event)); } } foreach (var projection in _inlineProjections.Value) { projection.Apply(session, session.WorkTracker.Streams.ToList()); } }
public async Task <IEventStream <TDoc> > FetchForWriting(DocumentSessionBase session, TId id, long expectedStartingVersion, CancellationToken cancellation = default) { await _identityStrategy.EnsureAggregateStorageExists <TDoc>(session, cancellation).ConfigureAwait(false); await session.Database.EnsureStorageExistsAsync(typeof(TDoc), cancellation).ConfigureAwait(false); var command = _identityStrategy.BuildCommandForReadingVersionForStream(id, false); var builder = new CommandBuilder(command); builder.Append(";"); var handler = new LoadByIdHandler <TDoc, TId>(_storage, id); handler.ConfigureCommand(builder, session); long version = 0; try { using var reader = await session.ExecuteReaderAsync(builder.Compile(), cancellation).ConfigureAwait(false); if (await reader.ReadAsync(cancellation).ConfigureAwait(false)) { version = await reader.GetFieldValueAsync <long>(0, cancellation).ConfigureAwait(false); } if (expectedStartingVersion != version) { throw new ConcurrencyException( $"Expected the existing version to be {expectedStartingVersion}, but was {version}", typeof(TDoc), id); } await reader.NextResultAsync(cancellation).ConfigureAwait(false); var document = await handler.HandleAsync(reader, session, cancellation).ConfigureAwait(false); return(version == 0 ? _identityStrategy.StartStream <TDoc>(document, session, id, cancellation) : _identityStrategy.AppendToStream <TDoc>(document, session, id, version, cancellation)); } catch (Exception e) { if (e.Message.Contains(MartenCommandException.MaybeLockedRowsMessage)) { throw new StreamLockedException(id, e.InnerException); } throw; } }
internal async Task ProcessEventsAsync(DocumentSessionBase session, CancellationToken token) { if (!session._workTracker.Streams.Any()) { return; } // TODO -- we'll optimize this later to batch up queries to the database var fetcher = new EventSequenceFetcher(this, session.WorkTracker.Streams.Sum(x => x.Events.Count)); var sequences = await session.ExecuteHandlerAsync(fetcher, token); var storage = session.EventStorage(); foreach (var stream in session.WorkTracker.Streams) { stream.TenantId ??= session.Tenant.TenantId; if (stream.ActionType == StreamActionType.Start) { stream.PrepareEvents(0, this, sequences, session); session.QueueOperation(storage.InsertStream(stream)); } else { var handler = storage.QueryForStream(stream); var state = await session.ExecuteHandlerAsync(handler, token); if (state == null) { stream.PrepareEvents(0, this, sequences, session); session.QueueOperation(storage.InsertStream(stream)); } else { stream.PrepareEvents(state.Version, this, sequences, session); session.QueueOperation(storage.UpdateStreamVersion(stream)); } } foreach (var @event in stream.Events) { session.QueueOperation(storage.AppendEvent(this, session, stream, @event)); } } foreach (var projection in _inlineProjections.Value) { await projection.ApplyAsync(session, session.WorkTracker.Streams.ToList(), token); } }
static void AppendRawEvent(DocumentSessionBase session, dynamic @event) { using var conn = session.Tenant.OpenConnection(); conn.Execute( $@"select * from {session.Options.Events.DatabaseSchemaName}.mt_append_event( stream := '{@event.StreamId}', stream_type := null, tenantid := '{Tenancy.DefaultTenantId}', event_ids := '{{""{@event.Id}""}}', event_types := '{{""{@event.EventTypeName}""}}', dotnet_types := '{{""{@event.DotnetTypeName}""}}', bodies := '{{""{JsonConvert.SerializeObject(@event.Body).Replace("\"", "\\\"")}""}}')"); }
internal ProjectionUpdateBatch(EventGraph events, DocumentSessionBase session, EventRange range) { Range = range; _session = session; Queue = new ActionBlock <IStorageOperation>(processOperation, new ExecutionDataflowBlockOptions { MaxDegreeOfParallelism = 1, EnsureOrdered = true }); startNewPage(session); var progressOperation = range.BuildProgressionOperation(events); Queue.Post(progressOperation); }
internal StreamAction StartStream(DocumentSessionBase session, string streamKey, params object[] events) { EnsureAsStringStorage(session); if (streamKey.IsEmpty()) { throw new ArgumentOutOfRangeException(nameof(streamKey), "The stream key cannot be null or empty"); } var stream = StreamAction.Start(this, streamKey, events); session.WorkTracker.Streams.Add(stream); return(stream); }
internal StreamAction StartEmptyStream(DocumentSessionBase session, string key, params object[] events) { EnsureAsStringStorage(session); if (key.IsEmpty()) { throw new ArgumentOutOfRangeException(nameof(key), "Cannot use an empty or null string as the stream key"); } var stream = new StreamAction(key, StreamActionType.Start); session.WorkTracker.Streams.Add(stream); return(stream); }
internal StreamAction StartStream(DocumentSessionBase session, Guid id, params object[] events) { EnsureAsGuidStorage(session); if (id == Guid.Empty) { throw new ArgumentOutOfRangeException(nameof(id), "Cannot use an empty Guid as the stream id"); } var stream = StreamAction.Start(this, id, events); session.WorkTracker.Streams.Add(stream); return(stream); }
internal StreamAction Append(DocumentSessionBase session, string stream, params object[] events) { EnsureAsStringStorage(session); if (session.UnitOfWork.TryFindStream(stream, out var eventStream)) { eventStream.AddEvents(events); } else { eventStream = StreamAction.Append(stream, events); session.UnitOfWork.Streams.Add(eventStream); } return(eventStream); }
internal StreamAction Append(DocumentSessionBase session, string stream, params object[] events) { EnsureAsStringStorage(session); var wrapped = events.Select(BuildEvent).ToArray(); if (session.WorkTracker.TryFindStream(stream, out var eventStream)) { eventStream.AddEvents(wrapped); } else { eventStream = StreamAction.Append(stream, wrapped); session.WorkTracker.Streams.Add(eventStream); } return(eventStream); }
public async Task <IStorageOperation?> DetermineOperation(DocumentSessionBase session, EventSlice <TDoc, TId> slice, CancellationToken cancellation, ProjectionLifecycle lifecycle = ProjectionLifecycle.Inline) { var aggregate = slice.Aggregate; if (slice.Aggregate == null && lifecycle == ProjectionLifecycle.Inline) { aggregate = await Storage.LoadAsync(slice.Id, session, cancellation).ConfigureAwait(false); } var exists = aggregate != null; foreach (var @event in slice.Events()) { try { aggregate = await ApplyEvent(session, slice, @event, aggregate, cancellation).ConfigureAwait(false); } catch (MartenCommandException) { throw; } catch (NpgsqlException) { throw; } catch (Exception e) { throw new ApplyEventException(@event, e); } } if (aggregate != null) { Storage.SetIdentity(aggregate, slice.Id); } if (aggregate == null) { return(exists ? Storage.DeleteForId(slice.Id, slice.Tenant) : null); } return(Storage.Upsert(aggregate, session, slice.Tenant)); }
internal ProjectionUpdateBatch(EventGraph events, DaemonSettings settings, DocumentSessionBase session, EventRange range, CancellationToken token, ShardExecutionMode mode) { Range = range; _settings = settings; _session = session; _token = token; _mode = mode; Queue = new ActionBlock <IStorageOperation>(processOperation, new ExecutionDataflowBlockOptions { MaxDegreeOfParallelism = 1, EnsureOrdered = true, CancellationToken = token }); startNewPage(session); var progressOperation = range.BuildProgressionOperation(events); Queue.Post(progressOperation); }
public EventStore(DocumentSessionBase session, DocumentStore store, ITenant tenant) { _session = session; _store = store; _tenant = tenant; // TODO -- we can make much more of this lazy StreamIdentity = _store.Events.StreamIdentity; if (StreamIdentity == StreamIdentity.AsGuid) { _selector = new EventSelector(_store.Events, _store.Serializer); } else { _selector = new StringIdentifiedEventSelector(_store.Events, _store.Serializer); } }
internal bool TryCreateTombstoneBatch(DocumentSessionBase session, out UpdateBatch batch) { if (session.UnitOfWork.Streams.Any()) { var stream = StreamAction.ForTombstone(); var tombstone = new Tombstone(); var mapping = EventMappingFor <Tombstone>(); var operations = new List <IStorageOperation>(); var storage = session.EventStorage(); var dotNetTypeName = DotnetTypeNameFor(typeof(Tombstone)); operations.Add(_establishTombstone.Value); var tombstones = session.UnitOfWork.Streams .SelectMany(x => x.Events) .Select(x => new Event <Tombstone>(tombstone) { Sequence = x.Sequence, Version = x.Version, TenantId = x.TenantId, StreamId = EstablishTombstoneStream.StreamId, StreamKey = EstablishTombstoneStream.StreamKey, Id = CombGuidIdGeneration.NewGuid(), EventTypeName = mapping.EventTypeName, DotNetTypeName = dotNetTypeName }) .Select(e => storage.AppendEvent(this, session, stream, e)); operations.AddRange(tombstones); batch = new UpdateBatch(operations); return(true); } batch = null; return(false); }