public ReaderPipe( IEventReader reader, ICheckpointStore checkpointStore, Func <PrepareContext, ValueTask> send ) { ILog log = LogProvider.GetCurrentClassLogger(); _pipe = Pipe.New <ReaderContext>( cfg => { cfg.UseConcurrencyLimit(1); cfg.UseRetry( retry => { retry.Incremental( 100, TimeSpan.Zero, TimeSpan.FromMilliseconds(100) ); retry.ConnectRetryObserver(new LoggingRetryObserver()); } ); cfg.UseLog(); cfg.UseExecuteAsync(Reader); } ); async Task Reader(ReaderContext ctx) { try { var start = await checkpointStore.LoadCheckpoint(ctx.CancellationToken).ConfigureAwait(false); log.Info("Reading from {Position}", start); await reader.ReadEvents( start, async read => { ReplicationMetrics.ReadingPosition.Set(read.Position.EventPosition); await send(new PrepareContext(read, ctx.CancellationToken)).ConfigureAwait(false); }, ctx.CancellationToken ).ConfigureAwait(false); } catch (OperationCanceledException) { // it's ok } finally { log.Info("Reader stopped"); } } }
public async Task <T> LoadState <T, TId>(StreamName stream, CancellationToken cancellationToken) where T : AggregateState <T, TId>, new() where TId : AggregateId { var state = new T(); const int pageSize = 500; var position = StreamReadPosition.Start; while (true) { var events = await _eventReader.ReadEvents( stream, position, pageSize, cancellationToken ) .NoContext(); foreach (var streamEvent in events) { Fold(streamEvent); } if (events.Length < pageSize) { break; } position = new StreamReadPosition(position.Value + events.Length); } return(state); void Fold(StreamEvent streamEvent) { var evt = streamEvent.Payload; if (evt == null) { return; } state = state.When(evt); } }
public IEnumerable <LogEvent> ReadEvents() { var unfilteredEvents = _reader.ReadEvents(); IEnumerable <LogEvent> query; if (String.IsNullOrEmpty(_config.FilterString)) { query = unfilteredEvents; // no filter defined } else { try { query = unfilteredEvents.AsQueryable().Where(_config.FilterString); } catch (ParseException pe) { // add some extra info to the error message throw new ArgumentException(String.Format("Could not parse filter string '{0}': {1}", _config.FilterString, pe), pe); } } IEnumerable <LogEvent> result; if (_config.UseMaxMessages) { result = query.Take(_config.MaxMessages); } else { result = query; } var count = result.Count(); if (count > 0) { Trace.WriteLine("Events found: " + count); } else { Trace.WriteLine("No events were found for FRENDS Radon report."); } return(result); }
public static async Task <StreamEvent[]> ReadStream( this IEventReader eventReader, StreamName streamName, StreamReadPosition start, bool failIfNotFound, CancellationToken cancellationToken ) { const int pageSize = 500; var streamEvents = new List <StreamEvent>(); var position = start; try { while (true) { var events = await eventReader.ReadEvents( streamName, position, pageSize, cancellationToken ) .NoContext(); streamEvents.AddRange(events); if (events.Length < pageSize) { break; } position = new StreamReadPosition(position.Value + events.Length); } } catch (StreamNotFound) when(!failIfNotFound) { return(Array.Empty <StreamEvent>()); } return(streamEvents.ToArray()); }
public IEnumerable <StreamEvent> ReadEvents(SqlConnection connection) { if (connection == null) { throw new ArgumentNullException(nameof(connection)); } yield return(new StreamEvent(RoadNetworks.Stream, new BeganRoadNetworkImport { When = InstantPattern.ExtendedIso.Format(_clock.GetCurrentInstant()) })); foreach (var @event in _reader.ReadEvents(connection)) { yield return(@event); } yield return(new StreamEvent(RoadNetworks.Stream, new CompletedRoadNetworkImport { When = InstantPattern.ExtendedIso.Format(_clock.GetCurrentInstant()) })); }
public IEnumerable <LogEvent> ReadEvents() { var oldEventIdentification = _identificationStore.GetAlreadyReportedEventIdentification(); var events = _eventReader.ReadEvents(); if (oldEventIdentification == null) { return(events); } var filteredEvents = events.TakeWhile( e => { var newerThan = e.TimeGenerated.ToUniversalTime() > oldEventIdentification.TimeStampUtc; if (newerThan) { // The event is newer than the old event return(true); } else { if (e.TimeGenerated.ToUniversalTime() == oldEventIdentification.TimeStampUtc) { var eventHash = HashBuilder.BuildEventIdentification(e).Hash; if (eventHash != oldEventIdentification.Hash) { // The event is the same age as the old event but has a different hash return(true); } } } return(false); // The event wasn't newer than than the old event or the same age and hash -> stop returning older events }); return(filteredEvents); }
public IEnumerable <StreamEvent> ReadEvents(SqlConnection connection) { if (connection == null) { throw new ArgumentNullException(nameof(connection)); } var watch = Stopwatch.StartNew(); _logger.LogInformation("Reading of {0} started ...", _name); var readCount = 0; foreach (var @event in _inner.ReadEvents(connection)) { readCount++; if (readCount % _threshold == 0) { _logger.LogInformation("Read {0} {1} within {2}ms so far ...", readCount, _name, watch.ElapsedMilliseconds); } yield return(@event); } _logger.LogInformation("Reading {0} {1} took {2}ms.", readCount, _name, watch.ElapsedMilliseconds); }
public IList <LogEvent> GetLogEvents() { return(_eventReader.ReadEvents().ToList()); }
public async Task Replicate( IEventReader reader, IEventWriter writer, ICheckpointStore checkpointStore, CancellationToken cancellationToken, Func <EventRead, bool> filter, Func <EventRead, ValueTask <EventWrite> > transform ) { var cts = new CancellationTokenSource(); var linkedCts = CancellationTokenSource.CreateLinkedTokenSource(cancellationToken, cts.Token); var start = await checkpointStore.LoadCheckpoint(); var retryPolicy = Policy .Handle <Exception>(ex => !(ex is OperationCanceledException)) .RetryForeverAsync(ex => Log.Warn(ex, "Writer error: {Error}, retrying", ex.Message)); var channel = Channel.CreateBounded <EventRead>(Capacity) .Source(reader.ReadEvents(start, linkedCts.Token)); if (filter != null) { channel = channel.Filter(x => Try(x, filter)); } Func <EventRead, ValueTask <EventWrite> > transformFunction; if (transform != null) { transformFunction = TryTransform; } else { transformFunction = DefaultTransform; } var resultChannel = channel .PipeAsync(5, transformFunction, Capacity, false, linkedCts.Token) .PipeAsync(WriteEvent, Capacity, true, linkedCts.Token) .Batch(1024, true, true); var lastPosition = new Position(); try { await resultChannel.ReadUntilCancelledAsync(linkedCts.Token, StoreCheckpoint, true); } catch (Exception e) { Log.Fatal(e, "Unable to proceed: {Error}", e.Message); } finally { Log.Info("Last recorded position: {Position}", lastPosition); } async ValueTask <Position> WriteEvent(EventWrite write) { await retryPolicy.ExecuteAsync(() => writer.WriteEvent(write)); return(write.SourcePosition); } T Try <T>(EventRead evt, Func <EventRead, T> func) { try { return(func(evt)); } catch (Exception e) { Log.Error(e, "Error in the pipeline: {Error}", e.Message); cts.Cancel(); throw; } } async ValueTask <EventWrite> TryTransform(EventRead evt) => await Try(evt, transform); ValueTask StoreCheckpoint(List <Position> positions) { lastPosition = positions.Last(); return(checkpointStore.StoreCheckpoint(lastPosition)); } }