/// <summary> /// Reads up events up to the last one available. Pre-loads the projection from its cache, /// if available, to reduce the necessary work. /// </summary> public async Task InitializeAsync(CancellationToken cancel = default(CancellationToken)) { var sw = Stopwatch.StartNew(); try { // Load project and discard events before that. _log?.Info($"{sw.Elapsed:mm':'ss'.'ff} [ES init] loading projections."); await _projection.TryLoadAsync(cancel).ConfigureAwait(false); var catchUp = _projection.Sequence + 1; if (_onEachCommitted != null) { foreach (var e in _onEachCommitted) { if (e.Start < catchUp) { catchUp = e.Start; } } } _log?.Info($"{sw.Elapsed:mm':'ss'.'ff} [ES init] advancing stream to seq {catchUp}."); await Stream.DiscardUpTo(catchUp, cancel).ConfigureAwait(false); if (Stream.Sequence < catchUp) { _log?.Warning( $"{sw.Elapsed:mm':'ss'.'ff} [ES init] invalid seq {catchUp} > {Stream.Sequence}, resetting everything."); // Cache is apparently beyond the available sequence. Could happen in // development environments with non-persistent events but persistent // caches. Treat cache as invalid and start from the beginning. Stream.Reset(); _projection.Reset(); } } catch (Exception e) { _log?.Warning( $"{sw.Elapsed:mm':'ss'.'ff} [ES init] error while reading cache.", e); // Something went wrong when reading the cache. Stop. Stream.Reset(); _projection.Reset(); } // Start reading everything _log?.Info($"{sw.Elapsed:mm':'ss'.'ff} [ES init] catching up with stream."); await CatchUpAsync(cancel).ConfigureAwait(false); _log?.Info($"{sw.Elapsed:mm':'ss'.'ff} [ES init] DONE !"); }
/// <summary> /// Catch up with the stream (updating the state) until there are no new /// events available. /// </summary> public async Task CatchUpAsync(CancellationToken cancel = default) { Func <bool> finishFetch; // Local variable, to avoid reaching the limit when not doing the // initial catch-up. var eventsSinceLastCacheLoad = 0u; do { var fetchTask = Stream.BackgroundFetchAsync(cancel); // We have started fetching the next batch of events in // the background, so we might as well start processing // those we already have. This pattern is optimized for // when fetching events takes longer than processing them, // and remains safe (i.e. no runaway memory usage) when // the reverse is true. eventsSinceLastCacheLoad += CatchUpLocal(); // Maybe we have reached the event count limit before our // save/load cycle ? if (eventsSinceLastCacheLoad >= EventsBetweenCacheSaves) { eventsSinceLastCacheLoad = 0; var sw = Stopwatch.StartNew(); if (await _projection.TrySaveAsync(cancel)) { // Reset first, to release any used memory. _projection.Reset(); await _projection.TryLoadAsync(cancel); if (_projection.Sequence != Stream.Sequence) { throw new InvalidOperationException( "Projection Save/Load cycle failed to restore sequence."); } _log.Info($"[ES read] cache save/load cycle in {sw.Elapsed} at seq {_projection.Sequence}."); } } finishFetch = await fetchTask; } while (finishFetch()); // We reach this point if 1° all events cached in the stream have // been processed and 2° the fetch operation returned no new events NotifyRefresh(); }
public void multi_apply_reset() { var reified = new ReifiedProjectionGroup <int, State>(new IProjection <int>[] { MockInteger().Object, MockString().Object }); reified.Apply(1U, 10); Assert.IsNotNull(reified.Current); reified.Reset(); Assert.AreEqual((uint)0U, (uint)reified.Sequence); Assert.AreEqual((int)0, (int)reified.Current.I.Value); Assert.AreEqual("I", reified.Current.S); }
/// <summary> Reset the wrapper. Used when it is necessary to try again. </summary> public void Reset() { _projection.Reset(); Stream.Reset(); }