private static TableQuery <EventTagEntry> GenerateTaggedMessageQuery( ReplayTaggedMessages replay) { var partitionKeyFilter = TableQuery.GenerateFilterCondition( "PartitionKey", QueryComparisons.Equal, EventTagEntry.GetPartitionKey(replay.Tag)); var utcTicksTRowKeyFilter = TableQuery.CombineFilters( TableQuery.GenerateFilterCondition( "RowKey", QueryComparisons.GreaterThan, $"{replay.FromOffset.ToJournalRowKey()}{EventTagEntry.AsciiIncrementedDelimiter}"), TableOperators.And, TableQuery.GenerateFilterCondition( "RowKey", QueryComparisons.LessThanOrEqual, $"{replay.ToOffset.ToJournalRowKey()}{EventTagEntry.Delimiter}")); var filter = TableQuery.CombineFilters( partitionKeyFilter, TableOperators.And, utcTicksTRowKeyFilter); var returnValue = new TableQuery <EventTagEntry>().Where(filter); return(returnValue); }
/// <summary> /// Replays all events with given tag within provided boundaries from current database. /// </summary> /// <param name="replay">TBD</param> /// <returns>TBD</returns> private async Task <long> ReplayTaggedMessagesAsync( ReplayTaggedMessages replay) { var query = GenerateTaggedMessageQuery(replay); // While we can specify the TakeCount, the CloudTable client does // not really respect this fact and will keep pulling results. query.TakeCount = replay.Max > int.MaxValue ? int.MaxValue : (int)replay.Max; // In order to actually break at the limit we ask for we have to // keep a separate counter and track it ourselves. var counter = 0; TableQuerySegment <EventTagEntry> result = null; var maxOrderingId = 0L; do { result = await Table.ExecuteQuerySegmentedAsync(query, result?.ContinuationToken); foreach (var entry in result.Results.OrderBy(x => x.UtcTicks)) { var deserialized = _serialization.PersistentFromBytes(entry.Payload); var persistent = new Persistent( deserialized.Payload, deserialized.SequenceNr, deserialized.PersistenceId, deserialized.Manifest, deserialized.IsDeleted, ActorRefs.NoSender, deserialized.WriterGuid, timestamp: entry.UtcTicks); foreach (var adapted in AdaptFromJournal(persistent)) { _log.Debug("Sending replayed message: persistenceId:{0} - sequenceNr:{1} - event:{2}", deserialized.PersistenceId, deserialized.SequenceNr, deserialized.Payload); replay.ReplyTo.Tell(new ReplayedTaggedMessage(adapted, replay.Tag, entry.UtcTicks), ActorRefs.NoSender); counter++; } maxOrderingId = Math.Max(maxOrderingId, entry.UtcTicks); } if (counter >= replay.Max) { break; } } while (result.ContinuationToken != null); return(maxOrderingId); }
private void StartTaggedSubscription(ReplayTaggedMessages msg) { long?nullable = msg.FromOffset == 0 ? (long?)null : msg.FromOffset; _subscriptions.Subscribe( Sender, msg.Tag, nullable, (int)msg.Max, @event => new ReplayedTaggedMessage( _eventAdapter.Adapt(@event), msg.Tag, @event.Link?.EventNumber ?? @event.OriginalEventNumber) ); }
/// <summary> /// Replays all events with given tag withing provided boundaries from current database. /// </summary> /// <param name="replay">TBD</param> /// <returns>TBD</returns> private async Task <long> ReplayTaggedMessagesAsync(ReplayTaggedMessages replay) { // Limit allows only integer var limitValue = replay.Max >= int.MaxValue ? int.MaxValue : (int)replay.Max; var fromSequenceNr = replay.FromOffset; var toSequenceNr = replay.ToOffset; var tag = replay.Tag; // Do not replay messages if limit equal zero if (limitValue == 0) { return(0); } var builder = Builders <JournalEntry> .Filter; var filter = builder.AnyEq(x => x.Tags, tag); if (fromSequenceNr > 0) { filter &= builder.Gt(x => x.Ordering, new BsonTimestamp(fromSequenceNr)); } if (toSequenceNr != long.MaxValue) { filter &= builder.Lte(x => x.Ordering, new BsonTimestamp(toSequenceNr)); } var sort = Builders <JournalEntry> .Sort.Ascending(x => x.Ordering); long maxOrderingId = 0; await _journalCollection.Value .Find(filter) .Sort(sort) .Limit(limitValue) .ForEachAsync(entry => { var persistent = new Persistent(entry.Payload, entry.SequenceNr, entry.PersistenceId, entry.Manifest, entry.IsDeleted, ActorRefs.NoSender, null); foreach (var adapted in AdaptFromJournal(persistent)) { replay.ReplyTo.Tell(new ReplayedTaggedMessage(adapted, tag, entry.Ordering.Value), ActorRefs.NoSender); } maxOrderingId = entry.Ordering.Value; }); return(maxOrderingId); }
protected override async Task HandleReplayTaggedMessages(ReplayTaggedMessages req, OracleCommand command) { var replyTo = req.ReplyTo; try { var maxSequenceNr = 0L; var tag = req.Tag; var toOffset = req.ToOffset; var fromOffset = req.FromOffset; var take = Math.Min(toOffset - fromOffset, req.Max); command.CommandText = ByTagSql; command.Parameters.Clear(); AddParameter(command, ":Ordering", OracleDbType.Int64, fromOffset); AddParameter(command, ":Tag", OracleDbType.NVarchar2, "%;" + tag + ";%"); AddParameter(command, ":Take", OracleDbType.Int64, take); using (var reader = await command.ExecuteReaderAsync()) { while (await reader.ReadAsync()) { var persistent = ReadEvent(reader); var ordering = reader.GetInt64(OrderingIndex); maxSequenceNr = Math.Max(maxSequenceNr, persistent.SequenceNr); foreach (var adapted in AdaptFromJournal(persistent)) { replyTo.Tell(new ReplayedTaggedMessage(adapted, tag, ordering), ActorRefs.NoSender); } } } replyTo.Tell(new RecoverySuccess(maxSequenceNr)); } catch (Exception cause) { replyTo.Tell(new ReplayMessagesFailure(cause)); } }
/// <summary> /// Replays all events with given tag withing provided boundaries from current database. /// </summary> /// <param name="replay">TBD</param> /// <returns>TBD</returns> private async Task <long> ReplayTaggedMessagesAsync(ReplayTaggedMessages replay) { /* * NOTE: limit is used like a pagination value, not a cap on the amount * of data returned by a query. This was at the root of https://github.com/akkadotnet/Akka.Persistence.MongoDB/issues/80 */ // Limit allows only integer var limitValue = replay.Max >= int.MaxValue ? int.MaxValue : (int)replay.Max; var fromSequenceNr = replay.FromOffset; var toSequenceNr = replay.ToOffset; var tag = replay.Tag; var builder = Builders <JournalEntry> .Filter; var seqNoFilter = builder.AnyEq(x => x.Tags, tag); if (fromSequenceNr > 0) { seqNoFilter &= builder.Gt(x => x.Ordering, new BsonTimestamp(fromSequenceNr)); } if (toSequenceNr != long.MaxValue) { seqNoFilter &= builder.Lte(x => x.Ordering, new BsonTimestamp(toSequenceNr)); } // Need to know what the highest seqNo of this query will be // and return that as part of the RecoverySuccess message var maxSeqNoEntry = await _journalCollection.Value.Find(seqNoFilter) .SortByDescending(x => x.Ordering) .Limit(1) .SingleOrDefaultAsync(); if (maxSeqNoEntry == null) { return(0L); // recovered nothing } var maxOrderingId = maxSeqNoEntry.Ordering.Value; var toSeqNo = Math.Min(toSequenceNr, maxOrderingId); var readFilter = builder.AnyEq(x => x.Tags, tag); if (fromSequenceNr > 0) { readFilter &= builder.Gt(x => x.Ordering, new BsonTimestamp(fromSequenceNr)); } if (toSequenceNr != long.MaxValue) { readFilter &= builder.Lte(x => x.Ordering, new BsonTimestamp(toSeqNo)); } var sort = Builders <JournalEntry> .Sort.Ascending(x => x.Ordering); await _journalCollection.Value .Find(readFilter) .Sort(sort) .Limit(limitValue) .ForEachAsync(entry => { var persistent = ToPersistenceRepresentation(entry, ActorRefs.NoSender); foreach (var adapted in AdaptFromJournal(persistent)) { replay.ReplyTo.Tell(new ReplayedTaggedMessage(adapted, tag, entry.Ordering.Value), ActorRefs.NoSender); } }); return(maxOrderingId); }