public EventConsumerGrainTests() { grainState.Value.Position = initialPosition; consumerName = eventConsumer.GetType().Name; A.CallTo(() => eventStore.CreateSubscription(A <IEventSubscriber> .Ignored, A <string> .Ignored, A <string> .Ignored)) .Returns(eventSubscription); A.CallTo(() => eventConsumer.Name) .Returns(consumerName); A.CallTo(() => eventConsumer.Handles(A <StoredEvent> .Ignored)) .Returns(true); A.CallTo(() => formatter.Parse(eventData, null)) .Returns(envelope); sut = new MyEventConsumerGrain( x => eventConsumer, grainState, eventStore, formatter, log); }
public EventConsumerGrainTests() { state.Position = initialPosition; consumerName = eventConsumer.GetType().Name; A.CallTo(() => store.WithSnapshots(A <Type> .Ignored, consumerName, A <Func <EventConsumerState, Task> > .Ignored)) .Invokes(new Action <Type, string, Func <EventConsumerState, Task> >((t, key, a) => { apply = a; })) .Returns(persistence); A.CallTo(() => eventStore.CreateSubscription(A <IEventSubscriber> .Ignored, A <string> .Ignored, A <string> .Ignored)) .Returns(eventSubscription); A.CallTo(() => eventConsumer.Name) .Returns(consumerName); A.CallTo(() => persistence.ReadAsync(EtagVersion.Any)) .Invokes(new Action <long>(s => apply(state))); A.CallTo(() => persistence.WriteSnapshotAsync(A <EventConsumerState> .Ignored)) .Invokes(new Action <EventConsumerState>(s => state = s)); A.CallTo(() => formatter.Parse(eventData, null)) .Returns(envelope); sut = new MyEventConsumerGrain( x => eventConsumer, store, eventStore, formatter, log); }
private void SetupEventStore(MyEvent[] events, int eventOffset, int readPosition = 0) { var eventsStored = new List <StoredEvent>(); var i = eventOffset; foreach (var @event in events) { var eventData = new EventData("Type", new EnvelopeHeaders(), "Payload"); var eventStored = new StoredEvent(key.ToString(), i.ToString(CultureInfo.InvariantCulture), i, eventData); eventsStored.Add(eventStored); A.CallTo(() => eventDataFormatter.Parse(eventStored)) .Returns(new Envelope <IEvent>(@event)); A.CallTo(() => eventDataFormatter.ParseIfKnown(eventStored)) .Returns(new Envelope <IEvent>(@event)); i++; } A.CallTo(() => eventStore.QueryAsync(key.ToString(), readPosition, A <CancellationToken> ._)) .Returns(eventsStored); }
private void SetupEventStore(Dictionary <DomainId, List <MyEvent> > streams) { var storedStreams = new Dictionary <string, IReadOnlyList <StoredEvent> >(); foreach (var(id, stream) in streams) { var storedStream = new List <StoredEvent>(); var i = 0; foreach (var @event in stream) { var eventData = new EventData("Type", new EnvelopeHeaders(), "Payload"); var eventStored = new StoredEvent(id.ToString(), i.ToString(), i, eventData); storedStream.Add(eventStored); A.CallTo(() => eventDataFormatter.Parse(eventStored)) .Returns(new Envelope <IEvent>(@event)); A.CallTo(() => eventDataFormatter.ParseIfKnown(eventStored)) .Returns(new Envelope <IEvent>(@event)); i++; } storedStreams[id.ToString()] = storedStream; } var streamNames = streams.Keys.Select(x => x.ToString()).ToArray(); A.CallTo(() => eventStore.QueryManyAsync(A <IEnumerable <string> > .That.IsSameSequenceAs(streamNames))) .Returns(storedStreams); }
private async Task ReadEventsAsync(BackupReader reader) { await reader.ReadEventsAsync(streamNameResolver, async storedEvent => { var @event = eventDataFormatter.Parse(storedEvent.Data); await HandleEventAsync(reader, storedEvent, @event); }); Log("Reading events completed."); }
private Envelope <IEvent>?ParseKnownEvent(StoredEvent storedEvent) { try { return(eventDataFormatter.Parse(storedEvent.Data)); } catch (TypeNameNotFoundException) { return(null); } }
public async Task Should_ignore_old_events() { var storedEvent = new StoredEvent("1", "1", 0, new EventData("Type", new EnvelopeHeaders(), "Payload")); A.CallTo(() => eventStore.QueryAsync(key, 0)) .Returns(new List <StoredEvent> { storedEvent }); A.CallTo(() => eventDataFormatter.Parse(storedEvent.Data, null)) .Throws(new TypeNameNotFoundException()); var persistedEvents = new List <IEvent>(); var persistence = sut.WithEventSourcing(None.Type, key, x => persistedEvents.Add(x.Payload)); await persistence.ReadAsync(); Assert.Empty(persistedEvents); Assert.Equal(0, persistence.Version); }
public EventConsumerGrainTests() { grainState.Value.Position = initialPosition; consumerName = eventConsumer.GetType().Name; A.CallTo(() => eventStore.CreateSubscription(A <IEventSubscriber> ._, A <string> ._, A <string> ._)) .Returns(eventSubscription); A.CallTo(() => eventConsumer.Name) .Returns(consumerName); A.CallTo(() => eventConsumer.Handles(A <StoredEvent> ._)) .Returns(true); A.CallTo(() => eventConsumer.On(A <IEnumerable <Envelope <IEvent> > > ._)) .Invokes((IEnumerable <Envelope <IEvent> > events) => { foreach (var @event in events) { eventConsumer.On(@event).Wait(); } }); A.CallTo(() => eventSubscription.Sender) .Returns(eventSubscription); A.CallTo(() => formatter.Parse(eventData)) .Returns(envelope); sut = new MyEventConsumerGrain( x => eventConsumer, grainState, eventStore, formatter, log); }
protected override async Task OnActivateAsync(string key) { streamName = $"comments-{key}"; var storedEvents = await eventStore.QueryLatestAsync(streamName, 100); foreach (var @event in storedEvents) { var parsedEvent = eventDataFormatter.Parse(@event.Data); version = @event.EventStreamNumber; events.Add(parsedEvent.To <CommentsEvent>()); } }
private Envelope <IEvent>?ParseKnownEvent(StoredEvent storedEvent) { try { var @event = eventDataFormatter.Parse(storedEvent.Data); @event.SetEventPosition(storedEvent.EventPosition); @event.SetEventStreamNumber(storedEvent.EventStreamNumber); return(@event); } catch (TypeNameNotFoundException) { return(null); } }
private Envelope <IEvent> ParseKnownEvent(StoredEvent message) { try { var @event = eventDataFormatter.Parse(message.Data); @event.SetEventPosition(message.EventPosition); @event.SetEventStreamNumber(message.EventStreamNumber); return(@event); } catch (TypeNameNotFoundException) { log.LogDebug(w => w.WriteProperty("oldEventFound", message.Data.Type)); return(null); } }
private async Task ReadEventsAsync(BackupReader reader) { await reader.ReadEventsAsync(streamNameResolver, async (storedEvent) => { var @event = eventDataFormatter.Parse(storedEvent.Data); if (@event.Payload is SquidexEvent squidexEvent) { squidexEvent.Actor = actor; } if (@event.Payload is AppCreated appCreated) { CurrentJob.AppId = appCreated.AppId.Id; if (!string.IsNullOrWhiteSpace(CurrentJob.NewAppName)) { appCreated.Name = CurrentJob.NewAppName; } } if (@event.Payload is AppEvent appEvent && !string.IsNullOrWhiteSpace(CurrentJob.NewAppName)) { appEvent.AppId = new NamedId <Guid>(appEvent.AppId.Id, CurrentJob.NewAppName); } foreach (var handler in handlers) { await handler.RestoreEventAsync(@event, CurrentJob.AppId, reader, actor); } var eventData = eventDataFormatter.ToEventData(@event, @event.Headers.CommitId()); var eventCommit = new List <EventData> { eventData }; await eventStore.AppendAsync(Guid.NewGuid(), storedEvent.StreamName, eventCommit); Log($"Read {reader.ReadEvents} events and {reader.ReadAttachments} attachments.", true); }); Log("Reading events completed."); }
private void SetupEventStore(MyEvent[] events, int eventOffset = 0, int readPosition = 0) { var eventsStored = new List <StoredEvent>(); var i = eventOffset; foreach (var @event in events) { var eventData = new EventData(); var eventStored = new StoredEvent(i.ToString(), i, eventData); eventsStored.Add(eventStored); A.CallTo(() => eventDataFormatter.Parse(eventData, true)) .Returns(new Envelope <IEvent>(@event)); i++; } A.CallTo(() => eventStore.GetEventsAsync(key, readPosition)) .Returns(eventsStored); }
private async Task ProcessAsync(State job, CancellationToken ct) { try { currentReminder = await RegisterOrUpdateReminder("KeepAlive", TimeSpan.Zero, TimeSpan.FromMinutes(2)); var rules = await appProvider.GetRulesAsync(Key); var rule = rules.Find(x => x.Id == job.RuleId); if (rule == null) { throw new InvalidOperationException("Cannot find rule."); } await eventStore.QueryAsync(async storedEvent => { var @event = eventDataFormatter.Parse(storedEvent.Data); var jobs = await ruleService.CreateJobsAsync(rule.RuleDef, rule.Id, @event); foreach (var job in jobs) { await ruleEventRepository.EnqueueAsync(job, job.Created, ct); } job.Position = storedEvent.EventPosition; await state.WriteAsync(); }, SquidexHeaders.AppId, Key.ToString(), job.Position, ct); } catch (OperationCanceledException) { return; } catch (Exception ex) { log.LogError(ex, w => w .WriteProperty("action", "runeRule") .WriteProperty("status", "failed") .WriteProperty("ruleId", job.RuleId?.ToString())); } finally { if (!isStopping) { job.RuleId = null; job.Position = null; await state.WriteAsync(); if (currentReminder != null) { await UnregisterReminder(currentReminder); currentReminder = null; } currentTaskToken = null; } } }
private async Task ProcessAsync(BackupJob job, RefToken actor, CancellationToken ct) { var handlers = CreateHandlers(); var lastTimestamp = job.Started; try { var appId = DomainId.Create(Key); using (var stream = backupArchiveLocation.OpenStream(job.Id)) { using (var writer = await backupArchiveLocation.OpenWriterAsync(stream)) { await writer.WriteVersionAsync(); var userMapping = new UserMapping(actor); var context = new BackupContext(appId, userMapping, writer); await eventStore.QueryAsync(async storedEvent => { var @event = eventDataFormatter.Parse(storedEvent); if (@event.Payload is SquidexEvent squidexEvent && squidexEvent.Actor != null) { context.UserMapping.Backup(squidexEvent.Actor); } foreach (var handler in handlers) { await handler.BackupEventAsync(@event, context); } writer.WriteEvent(storedEvent); job.HandledEvents = writer.WrittenEvents; job.HandledAssets = writer.WrittenAttachments; lastTimestamp = await WritePeriodically(lastTimestamp); }, GetFilter(), null, ct); foreach (var handler in handlers) { ct.ThrowIfCancellationRequested(); await handler.BackupAsync(context); } foreach (var handler in handlers) { ct.ThrowIfCancellationRequested(); await handler.CompleteBackupAsync(context); } await userMapping.StoreAsync(writer, userResolver); } stream.Position = 0; ct.ThrowIfCancellationRequested(); await backupArchiveStore.UploadAsync(job.Id, stream, ct); } job.Status = JobStatus.Completed; } catch (OperationCanceledException) { await RemoveAsync(job); } catch (Exception ex) { log.LogError(ex, job.Id.ToString(), (ctx, w) => w .WriteProperty("action", "makeBackup") .WriteProperty("status", "failed") .WriteProperty("backupId", ctx)); job.Status = JobStatus.Failed; } finally { job.Stopped = clock.GetCurrentInstant(); await state.WriteAsync(); currentJobToken?.Dispose(); currentJobToken = null; currentJob = null; } }
private async Task RebuildAppIndexes() { var appsByName = new Dictionary <string, Guid>(); var appsByUser = new Dictionary <string, HashSet <Guid> >(); bool HasApp(NamedId <Guid> appId, bool consistent, out Guid id) { return(appsByName.TryGetValue(appId.Name, out id) && (!consistent || id == appId.Id)); } HashSet <Guid> Index(string contributorId) { return(appsByUser.GetOrAddNew(contributorId)); } void RemoveApp(NamedId <Guid> appId, bool consistent) { if (HasApp(appId, consistent, out var id)) { foreach (var apps in appsByUser.Values) { apps.Remove(id); } appsByName.Remove(appId.Name); } } await eventStore.QueryAsync(storedEvent => { var @event = eventDataFormatter.Parse(storedEvent.Data); switch (@event.Payload) { case AppCreated appCreated: { RemoveApp(appCreated.AppId, false); appsByName[appCreated.Name] = appCreated.AppId.Id; break; } case AppContributorAssigned appContributorAssigned: { if (HasApp(appContributorAssigned.AppId, true, out _)) { Index(appContributorAssigned.ContributorId).Add(appContributorAssigned.AppId.Id); } break; } case AppContributorRemoved contributorRemoved: Index(contributorRemoved.ContributorId).Remove(contributorRemoved.AppId.Id); break; case AppArchived appArchived: RemoveApp(appArchived.AppId, true); break; } return(TaskHelper.Done); }, "^app\\-"); await indexApps.RebuildAsync(appsByName); foreach (var kvp in appsByUser) { await indexApps.RebuildByContributorsAsync(kvp.Key, kvp.Value); } }
public async Task RunAsync() { if (currentTask != null) { throw new DomainException("Another backup process is already running."); } if (state.Jobs.Count >= MaxBackups) { throw new DomainException($"You cannot have more than {MaxBackups} backups."); } var job = new BackupStateJob { Id = Guid.NewGuid(), Started = clock.GetCurrentInstant() }; currentTask = new CancellationTokenSource(); currentJob = job; var lastTimestamp = job.Started; state.Jobs.Insert(0, job); await WriteAsync(); try { using (var stream = await backupArchiveLocation.OpenStreamAsync(job.Id)) { using (var writer = new EventStreamWriter(stream)) { await eventStore.QueryAsync(async @event => { var eventData = @event.Data; if (eventData.Type == "AssetCreatedEvent" || eventData.Type == "AssetUpdatedEvent") { var parsedEvent = eventDataFormatter.Parse(eventData); var assetVersion = 0L; var assetId = Guid.Empty; if (parsedEvent.Payload is AssetCreated assetCreated) { assetId = assetCreated.AssetId; assetVersion = assetCreated.FileVersion; } if (parsedEvent.Payload is AssetUpdated asetUpdated) { assetId = asetUpdated.AssetId; assetVersion = asetUpdated.FileVersion; } await writer.WriteEventAsync(eventData, async attachmentStream => { await assetStore.DownloadAsync(assetId.ToString(), assetVersion, null, attachmentStream); }); job.HandledAssets++; } else { await writer.WriteEventAsync(eventData); } job.HandledEvents++; var now = clock.GetCurrentInstant(); if ((now - lastTimestamp) >= UpdateDuration) { lastTimestamp = now; await WriteAsync(); } }, SquidexHeaders.AppId, appId.ToString(), null, currentTask.Token); } stream.Position = 0; currentTask.Token.ThrowIfCancellationRequested(); await assetStore.UploadAsync(job.Id.ToString(), 0, null, stream, currentTask.Token); } } catch (Exception ex) { log.LogError(ex, w => w .WriteProperty("action", "makeBackup") .WriteProperty("status", "failed") .WriteProperty("backupId", job.Id.ToString())); job.IsFailed = true; } finally { await CleanupArchiveAsync(job); job.Stopped = clock.GetCurrentInstant(); await WriteAsync(); currentTask = null; currentJob = null; } }
public async Task StartNewAsync() { if (currentTask != null) { throw new DomainException("Another backup process is already running."); } if (state.Jobs.Count >= MaxBackups) { throw new DomainException($"You cannot have more than {MaxBackups} backups."); } var job = new BackupStateJob { Id = Guid.NewGuid(), Started = clock.GetCurrentInstant() }; currentTask = new CancellationTokenSource(); currentJob = job; state.Jobs.Add(job); await WriteAsync(); try { using (var stream = await backupArchiveLocation.OpenStreamAsync(job.Id)) { using (var writer = new EventStreamWriter(stream)) { await eventStore.QueryAsync(async @event => { var eventData = @event.Data; if (eventData.Type == nameof(AssetCreated) || eventData.Type == nameof(AssetUpdated)) { var parsedEvent = eventDataFormatter.Parse(eventData); var assetVersion = 0L; var assetId = Guid.Empty; if (parsedEvent.Payload is AssetCreated assetCreated) { assetId = assetCreated.AssetId; assetVersion = assetCreated.FileVersion; } if (parsedEvent.Payload is AssetUpdated asetUpdated) { assetId = asetUpdated.AssetId; assetVersion = asetUpdated.FileVersion; } await writer.WriteEventAsync(eventData, async attachmentStream => { await assetStore.DownloadAsync(assetId.ToString(), assetVersion, null, attachmentStream); }); } else { await writer.WriteEventAsync(eventData); } }, "AppId", appId, null, currentTask.Token); } stream.Position = 0; currentTask.Token.ThrowIfCancellationRequested(); await assetStore.UploadAsync(job.Id.ToString(), 0, null, stream); currentTask.Token.ThrowIfCancellationRequested(); } } catch { job.Failed = true; } finally { job.Stopped = clock.GetCurrentInstant(); await WriteAsync(); currentTask = null; currentJob = null; } }
public async Task RunAsync() { if (currentTask != null) { throw new DomainException("Another backup process is already running."); } if (state.Jobs.Count >= MaxBackups) { throw new DomainException($"You cannot have more than {MaxBackups} backups."); } var job = new BackupStateJob { Id = Guid.NewGuid(), Started = clock.GetCurrentInstant(), Status = JobStatus.Started }; currentTask = new CancellationTokenSource(); currentJob = job; var lastTimestamp = job.Started; state.Jobs.Insert(0, job); await WriteAsync(); try { using (var stream = await backupArchiveLocation.OpenStreamAsync(job.Id)) { using (var writer = new BackupWriter(serializer, stream, true)) { await eventStore.QueryAsync(async storedEvent => { var @event = eventDataFormatter.Parse(storedEvent.Data); writer.WriteEvent(storedEvent); foreach (var handler in handlers) { await handler.BackupEventAsync(@event, appId, writer); } job.HandledEvents = writer.WrittenEvents; job.HandledAssets = writer.WrittenAttachments; lastTimestamp = await WritePeriodically(lastTimestamp); }, SquidexHeaders.AppId, appId.ToString(), null, currentTask.Token); foreach (var handler in handlers) { await handler.BackupAsync(appId, writer); } foreach (var handler in handlers) { await handler.CompleteBackupAsync(appId, writer); } } stream.Position = 0; currentTask.Token.ThrowIfCancellationRequested(); await assetStore.UploadAsync(job.Id.ToString(), 0, null, stream, currentTask.Token); } job.Status = JobStatus.Completed; } catch (Exception ex) { log.LogError(ex, w => w .WriteProperty("action", "makeBackup") .WriteProperty("status", "failed") .WriteProperty("backupId", job.Id.ToString())); job.Status = JobStatus.Failed; } finally { await Safe.DeleteAsync(backupArchiveLocation, job.Id, log); job.Stopped = clock.GetCurrentInstant(); await WriteAsync(); currentTask = null; currentJob = null; } }
private async Task ProcessAsync(BackupStateJob job, CancellationToken ct) { var jobId = job.Id.ToString(); var handlers = CreateHandlers(); var lastTimestamp = job.Started; try { using (var stream = await backupArchiveLocation.OpenStreamAsync(jobId)) { using (var writer = new BackupWriter(serializer, stream, true)) { await eventStore.QueryAsync(async storedEvent => { var @event = eventDataFormatter.Parse(storedEvent.Data); writer.WriteEvent(storedEvent); foreach (var handler in handlers) { await handler.BackupEventAsync(@event, Key, writer); } job.HandledEvents = writer.WrittenEvents; job.HandledAssets = writer.WrittenAttachments; lastTimestamp = await WritePeriodically(lastTimestamp); }, SquidexHeaders.AppId, Key.ToString(), null, ct); foreach (var handler in handlers) { await handler.BackupAsync(Key, writer); } foreach (var handler in handlers) { await handler.CompleteBackupAsync(Key, writer); } } stream.Position = 0; ct.ThrowIfCancellationRequested(); await assetStore.UploadAsync(jobId, 0, null, stream, false, ct); } job.Status = JobStatus.Completed; } catch (Exception ex) { log.LogError(ex, jobId, (ctx, w) => w .WriteProperty("action", "makeBackup") .WriteProperty("status", "failed") .WriteProperty("backupId", ctx)); job.Status = JobStatus.Failed; } finally { await Safe.DeleteAsync(backupArchiveLocation, jobId, log); job.Stopped = clock.GetCurrentInstant(); await state.WriteAsync(); currentTask = null; currentJob = null; } }