public virtual bool AddSnapshot(ISnapshot snapshot) { if (snapshot == null) { return(false); } Logger.Debug(Messages.AddingSnapshot, snapshot.StreamId, snapshot.BucketId, snapshot.StreamRevision); try { BsonDocument mongoSnapshot = snapshot.ToMongoSnapshot(_serializer); IMongoQuery query = Query.EQ(MongoShapshotFields.Id, mongoSnapshot[MongoShapshotFields.Id]); UpdateBuilder update = Update.Set(MongoShapshotFields.Payload, mongoSnapshot[MongoShapshotFields.Payload]); // Doing an upsert instead of an insert allows us to overwrite an existing snapshot and not get stuck with a // stream that needs to be snapshotted because the insert fails and the SnapshotRevision isn't being updated. PersistedSnapshots.Update(query, update, UpdateFlags.Upsert); // More commits could have been made between us deciding that a snapshot is required and writing it so just // resetting the Unsnapshotted count may be a little off. Adding snapshots should be a separate process so // this is a good chance to make sure the numbers are still in-sync - it only adds a 'read' after all ... BsonDocument streamHeadId = GetStreamHeadId(snapshot.BucketId, snapshot.StreamId); StreamHead streamHead = PersistedStreamHeads.FindOneById(streamHeadId).ToStreamHead(); int unsnapshotted = streamHead.HeadRevision - snapshot.StreamRevision; PersistedStreamHeads.Update( Query.EQ(MongoStreamHeadFields.Id, streamHeadId), Update.Set(MongoStreamHeadFields.SnapshotRevision, snapshot.StreamRevision).Set(MongoStreamHeadFields.Unsnapshotted, unsnapshotted)); return(true); } catch (Exception) { return(false); } }
private void UpdateStreamHeadAsync(string bucketId, string streamId, int streamRevision, int eventsCount) { ThreadPool.QueueUserWorkItem(x => { try { TryMongo(() => { BsonDocument streamHeadId = GetStreamHeadId(bucketId, streamId); PersistedStreamHeads.Update( Query.EQ(MongoStreamHeadFields.Id, streamHeadId), Update .Set(MongoStreamHeadFields.HeadRevision, streamRevision) .Inc(MongoStreamHeadFields.SnapshotRevision, 0) .Inc(MongoStreamHeadFields.Unsnapshotted, eventsCount), UpdateFlags.Upsert); }); } catch (OutOfMemoryException ex) { throw; } catch (Exception ex) { //It is safe to ignore transient exception updating stream head. Logger.Warn("Ignored Exception '{0}' when upserting the stream head Bucket Id [{1}] StreamId[{2}].\n {3}", ex.GetType().Name, bucketId, streamId, ex.ToString()); } }, null ); }
private void UpdateStreamHeadAsync(string bucketId, string streamId, int streamRevision, int eventsCount) { ThreadPool.QueueUserWorkItem(x => TryMongo(() => { BsonDocument streamHeadId = GetStreamHeadId(bucketId, streamId); PersistedStreamHeads.Update( Query.EQ(MongoStreamHeadFields.Id, streamHeadId), Update .Set(MongoStreamHeadFields.HeadRevision, streamRevision) .Inc(MongoStreamHeadFields.SnapshotRevision, 0) .Inc(MongoStreamHeadFields.Unsnapshotted, eventsCount), UpdateFlags.Upsert); }), null); }
private void UpdateStreamHeadAsync(Guid streamId, int streamRevision, int eventsCount) { ThreadPool.QueueUserWorkItem( x => TryMongo( () => { PersistedStreamHeads.Update(Query.EQ("_id", streamId), Update.Set("HeadRevision", streamRevision) .Inc("SnapshotRevision", 0) .Inc("Unsnapshotted", eventsCount), UpdateFlags.Upsert); }), null); }
private void UpdateStreamHeadAsync(string bucketId, string streamId, int streamRevision, int eventsCount) { ThreadPool.QueueUserWorkItem(x => TryMongo(() => { try { BsonDocument streamHeadId = GetStreamHeadId(bucketId, streamId); PersistedStreamHeads.Update( Query.EQ(MongoStreamHeadFields.Id, streamHeadId), Update .Set(MongoStreamHeadFields.HeadRevision, streamRevision) .Inc(MongoStreamHeadFields.SnapshotRevision, 0) .Inc(MongoStreamHeadFields.Unsnapshotted, eventsCount), UpdateFlags.Upsert); } catch (MongoDuplicateKeyException ex) { Logger.Warn("Duplicate key exception {0} when upserting the stream head {1} {2}.", ex, bucketId, streamId); } }), null); }