private bool ShouldKeepCommit(CommitLogRecord commit, Dictionary<long, CommitInfo> commits) { CommitInfo commitInfo; if (commits.TryGetValue(commit.TransactionPosition, out commitInfo)) return commitInfo.KeepCommit != false; return true; }
public override void When() { _eventId = Guid.NewGuid(); _transactionId = Guid.NewGuid(); var record = new PrepareLogRecord( logPosition: 0, eventId: _eventId, correlationId: _transactionId, transactionPosition: 0xDEAD, transactionOffset: 0xBEEF, eventStreamId: "WorldEnding", expectedVersion: 1234, timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.Data, eventType: "type", data: new byte[] { 1, 2, 3, 4, 5 }, metadata: new byte[] { 7, 17 }); Assert.True(Writer.Write(record, out _logPosition)); Writer.Flush(); IndexCommitter.AddPendingPrepare(new[] { record }, _logPosition); var record2 = new CommitLogRecord( logPosition: _logPosition, correlationId: _transactionId, transactionPosition: 0, timeStamp: new DateTime(2012, 12, 21), firstEventNumber: 10); Assert.True(Writer.Write(record2, out _logPosition)); Writer.Flush(); }
private CommitLogRecord WriteCommitWithRetry(CommitLogRecord commit) { long newPos; if (!Writer.Write(commit, out newPos)) { var transactionPos = commit.TransactionPosition == commit.LogPosition ? newPos : commit.TransactionPosition; var record = new CommitLogRecord(newPos, commit.CorrelationId, transactionPos, commit.TimeStamp, commit.FirstEventNumber); long writtenPos = newPos; if (!Writer.Write(record, out newPos)) { throw new Exception( string.Format("Second write try failed when first writing commit at {0}, then at {1}.", commit.LogPosition, writtenPos)); } return(record); } return(commit); }
public PendingTransaction(long transactionPosition, long postPosition, IEnumerable <PrepareLogRecord> prepares, CommitLogRecord commit = null) { TransactionPosition = transactionPosition; PostPosition = postPosition; Prepares.AddRange(prepares); _commit = commit; }
private void CheckDuplicateEvents(uint streamHash, CommitLogRecord commit, IList <IndexEntry> indexEntries, IList <PrepareLogRecord> prepares) { using (var reader = _backend.BorrowReader()) { var entries = _tableIndex.GetRange(streamHash, indexEntries[0].Version, indexEntries[indexEntries.Count - 1].Version); foreach (var indexEntry in entries) { var prepare = prepares[indexEntry.Version - indexEntries[0].Version]; PrepareLogRecord indexedPrepare = GetPrepare(reader, indexEntry.Position); if (indexedPrepare != null && indexedPrepare.EventStreamId == prepare.EventStreamId) { if (Debugger.IsAttached) { Debugger.Break(); } else { throw new Exception( string.Format("Trying to add duplicate event #{0} to stream {1} (hash {2})\nCommit: {3}\n" + "Prepare: {4}\nIndexed prepare: {5}.", indexEntry.Version, prepare.EventStreamId, streamHash, commit, prepare, indexedPrepare)); } } } } }
private long WriteCommitV0(Guid correlationId, long logPosition, long transactionPosition, string eventStreamId, long eventNumber, out long pos) { var commit = new CommitLogRecord(logPosition, correlationId, transactionPosition, DateTime.UtcNow, eventNumber, LogRecordVersion.LogRecordV0); Writer.Write(commit, out pos); return(commit.LogPosition); }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); _db = new TFChunkDb(new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 16 * 1024, 0, new InMemoryCheckpoint(), new InMemoryCheckpoint(), new InMemoryCheckpoint(-1), new InMemoryCheckpoint(-1))); _db.Open(); var chunk = _db.Manager.GetChunkFor(0); _p1 = LogRecord.SingleWrite(0, Guid.NewGuid(), Guid.NewGuid(), "es-to-scavenge", ExpectedVersion.Any, "et1", new byte[] { 0, 1, 2 }, new byte[] { 5, 7 }); _res1 = chunk.TryAppend(_p1); _c1 = LogRecord.Commit(_res1.NewPosition, Guid.NewGuid(), _p1.LogPosition, 0); _cres1 = chunk.TryAppend(_c1); _p2 = LogRecord.SingleWrite(_cres1.NewPosition, Guid.NewGuid(), Guid.NewGuid(), "es-to-scavenge", ExpectedVersion.Any, "et1", new byte[] { 0, 1, 2 }, new byte[] { 5, 7 }); _res2 = chunk.TryAppend(_p2); _c2 = LogRecord.Commit(_res2.NewPosition, Guid.NewGuid(), _p2.LogPosition, 1); _cres2 = chunk.TryAppend(_c2); _p3 = LogRecord.SingleWrite(_cres2.NewPosition, Guid.NewGuid(), Guid.NewGuid(), "es-to-scavenge", ExpectedVersion.Any, "et1", new byte[] { 0, 1, 2 }, new byte[] { 5, 7 }); _res3 = chunk.TryAppend(_p3); _c3 = LogRecord.Commit(_res3.NewPosition, Guid.NewGuid(), _p3.LogPosition, 2); _cres3 = chunk.TryAppend(_c3); chunk.Complete(); _db.Config.WriterCheckpoint.Write(chunk.ChunkHeader.ChunkEndPosition); _db.Config.WriterCheckpoint.Flush(); _db.Config.ChaserCheckpoint.Write(chunk.ChunkHeader.ChunkEndPosition); _db.Config.ChaserCheckpoint.Flush(); var bus = new InMemoryBus("Bus"); var ioDispatcher = new IODispatcher(bus, new PublishEnvelope(bus)); var scavenger = new TFChunkScavenger(_db, ioDispatcher, new FakeTableIndex(), new FakeReadIndex(x => x == "es-to-scavenge"), Guid.NewGuid(), "fakeNodeIp"); scavenger.Scavenge(alwaysKeepScavenged: true, mergeChunks: false); _scavengedChunk = _db.Manager.GetChunk(0); }
public long GetCommitLastEventNumber(CommitLogRecord record) { if (Transactions.TryGetValue(record.CorrelationId, out var transaction)) { return(record.FirstEventNumber + transaction.Prepares.Count); } else { throw new InvalidOperationException("Cannot get last event number for an unknown transaction"); } }
private bool ShouldKeepCommit(CommitLogRecord commit, Dictionary <long, CommitInfo> commits) { CommitInfo commitInfo; if (!commits.TryGetValue(commit.TransactionPosition, out commitInfo)) { // This should never happen given that we populate `commits` from the commit records. return(true); } return(commitInfo.KeepCommit != false); }
public override async Task TestFixtureSetUp() { await base.TestFixtureSetUp(); _db = new TFChunkDb(TFChunkHelper.CreateSizedDbConfig(PathName, 0, chunkSize: 16 * 1024)); _db.Open(); var chunk = _db.Manager.GetChunkFor(0); var logFormat = LogFormatHelper <TLogFormat, TStreamId> .LogFormat; var streamName = "es-to-scavenge"; logFormat.StreamNameIndex.GetOrAddId(streamName, out var streamId, out _, out _); var expectedVersion = ExpectedVersion.NoStream; _p1 = LogRecord.SingleWrite(logFormat.RecordFactory, 0, Guid.NewGuid(), Guid.NewGuid(), streamId, expectedVersion++, "et1", new byte[2048], new byte[] { 5, 7 }); _res1 = chunk.TryAppend(_p1); _c1 = LogRecord.Commit(_res1.NewPosition, Guid.NewGuid(), _p1.LogPosition, 0); _cres1 = chunk.TryAppend(_c1); _p2 = LogRecord.SingleWrite(logFormat.RecordFactory, _cres1.NewPosition, Guid.NewGuid(), Guid.NewGuid(), streamId, expectedVersion++, "et1", new byte[2048], new byte[] { 5, 7 }); _res2 = chunk.TryAppend(_p2); _c2 = LogRecord.Commit(_res2.NewPosition, Guid.NewGuid(), _p2.LogPosition, 1); _cres2 = chunk.TryAppend(_c2); _p3 = LogRecord.SingleWrite(logFormat.RecordFactory, _cres2.NewPosition, Guid.NewGuid(), Guid.NewGuid(), streamId, expectedVersion++, "et1", new byte[2048], new byte[] { 5, 7 }); _res3 = chunk.TryAppend(_p3); _c3 = LogRecord.Commit(_res3.NewPosition, Guid.NewGuid(), _p3.LogPosition, 2); _cres3 = chunk.TryAppend(_c3); chunk.Complete(); _originalFileSize = chunk.FileSize; _db.Config.WriterCheckpoint.Write(chunk.ChunkHeader.ChunkEndPosition); _db.Config.WriterCheckpoint.Flush(); _db.Config.ChaserCheckpoint.Write(chunk.ChunkHeader.ChunkEndPosition); _db.Config.ChaserCheckpoint.Flush(); var scavenger = new TFChunkScavenger <TStreamId>(_db, new FakeTFScavengerLog(), new FakeTableIndex <TStreamId>(), new FakeReadIndex <TLogFormat, TStreamId>(x => EqualityComparer <TStreamId> .Default.Equals(x, streamId)), logFormat.SystemStreams); await scavenger.Scavenge(alwaysKeepScavenged : true, mergeChunks : false); _scavengedChunk = _db.Manager.GetChunk(0); }
private void ProcessCommitRecord(CommitLogRecord record, bool isTfEof) { CommitPendingTransaction(_transaction, isTfEof); var firstEventNumber = record.FirstEventNumber; var lastEventNumber = _indexCommitter.Commit(record, isTfEof, true); if (lastEventNumber == EventNumber.Invalid) { lastEventNumber = record.FirstEventNumber - 1; } _masterBus.Publish(new StorageMessage.CommitAck(record.CorrelationId, record.LogPosition, record.TransactionPosition, firstEventNumber, lastEventNumber, true)); }
private int Commit(CommitLogRecord commit, bool isTfEof, bool doingInit) { int eventNumber = EventNumber.Invalid; var lastCommitPosition = Interlocked.Read(ref _lastCommitPosition); if (commit.LogPosition < lastCommitPosition || (commit.LogPosition == lastCommitPosition && !doingInit)) { return(eventNumber); // already committed } var shit = GetTransactionPrepares(commit.TransactionPosition, commit.LogPosition).ToList(); return(Commit(shit, isTfEof, doingInit, commit.FirstEventNumber, true)); }
private void ProcessCommitRecord(CommitLogRecord record, long postPosition) { CommitPendingTransaction(_transaction, postPosition); var firstEventNumber = record.FirstEventNumber; var lastEventNumber = _indexCommitterService.GetCommitLastEventNumber(record); _indexCommitterService.AddPendingCommit(record, postPosition); if (lastEventNumber == EventNumber.Invalid) { lastEventNumber = record.FirstEventNumber - 1; } _masterBus.Publish(new StorageMessage.CommitAck(record.CorrelationId, record.LogPosition, record.TransactionPosition, firstEventNumber, lastEventNumber, true)); }
private void WriteV0HardDelete(string eventStreamId) { long pos; var logPosition = WriterCheckpoint.ReadNonFlushed(); var prepare = new PrepareLogRecord(logPosition, Guid.NewGuid(), Guid.NewGuid(), logPosition, 0, eventStreamId, int.MaxValue - 1, DateTime.UtcNow, PrepareFlags.StreamDelete | PrepareFlags.TransactionBegin | PrepareFlags.TransactionEnd, SystemEventTypes.StreamDeleted, new byte[0], new byte[0], prepareRecordVersion: LogRecordVersion.LogRecordV0); Writer.Write(prepare, out pos); var commit = new CommitLogRecord(WriterCheckpoint.ReadNonFlushed(), prepare.CorrelationId, prepare.LogPosition, DateTime.UtcNow, int.MaxValue, commitRecordVersion: LogRecordVersion.LogRecordV0); Writer.Write(commit, out pos); }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); _db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, 0, chunkSize: 16 * 1024)); _db.Open(); var chunk = _db.Manager.GetChunkFor(0); _p1 = LogRecord.SingleWrite(0, Guid.NewGuid(), Guid.NewGuid(), "es-to-scavenge", ExpectedVersion.Any, "et1", new byte[2048], new byte[] { 5, 7 }); _res1 = chunk.TryAppend(_p1); _c1 = LogRecord.Commit(_res1.NewPosition, Guid.NewGuid(), _p1.LogPosition, 0); _cres1 = chunk.TryAppend(_c1); _p2 = LogRecord.SingleWrite(_cres1.NewPosition, Guid.NewGuid(), Guid.NewGuid(), "es-to-scavenge", ExpectedVersion.Any, "et1", new byte[2048], new byte[] { 5, 7 }); _res2 = chunk.TryAppend(_p2); _c2 = LogRecord.Commit(_res2.NewPosition, Guid.NewGuid(), _p2.LogPosition, 1); _cres2 = chunk.TryAppend(_c2); _p3 = LogRecord.SingleWrite(_cres2.NewPosition, Guid.NewGuid(), Guid.NewGuid(), "es-to-scavenge", ExpectedVersion.Any, "et1", new byte[2048], new byte[] { 5, 7 }); _res3 = chunk.TryAppend(_p3); _c3 = LogRecord.Commit(_res3.NewPosition, Guid.NewGuid(), _p3.LogPosition, 2); _cres3 = chunk.TryAppend(_c3); chunk.Complete(); _originalFileSize = chunk.FileSize; _db.Config.WriterCheckpoint.Write(chunk.ChunkHeader.ChunkEndPosition); _db.Config.WriterCheckpoint.Flush(); _db.Config.ChaserCheckpoint.Write(chunk.ChunkHeader.ChunkEndPosition); _db.Config.ChaserCheckpoint.Flush(); var scavenger = new TFChunkScavenger(_db, new FakeTFScavengerLog(), new FakeTableIndex(), new FakeReadIndex(x => x == "es-to-scavenge")); scavenger.Scavenge(alwaysKeepScavenged: true, mergeChunks: false).Wait(); _scavengedChunk = _db.Manager.GetChunk(0); }
protected override void WriteTestScenario() { WriteSingleEvent("ES", 0, new string('.', 3000)); // chunk 1 WriteSingleEvent("ES", 1, new string('.', 3000)); WriteSingleEvent("ES", 2, new string('.', 3000)); WriteSingleEvent("ES", 3, new string('.', 3000), retryOnFail: true); // chunk 2 WriteSingleEvent("ES", 4, new string('.', 3000)); _event7prepare = WriteDeletePrepare("ES"); _event7commit = WriteDeleteCommit(_event7prepare); _event7 = new EventRecord(EventNumber.DeletedStream, _event7prepare); _event9 = WriteSingleEvent("ES2", 0, new string('.', 5000), retryOnFail: true); //chunk 3 Scavenge(completeLast: false, mergeChunks: false); }
public void SetUp() { _writerCheckpoint = new InMemoryCheckpoint(); _db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, _writerCheckpoint, new InMemoryCheckpoint(), 1024)); _db.Open(); _writer = new TFChunkWriter(_db); _writer.Open(); _record = new CommitLogRecord(logPosition: 0, correlationId: _eventId, transactionPosition: 4321, timeStamp: new DateTime(2012, 12, 21), firstEventNumber: 10); long newPos; _writer.Write(_record, out newPos); _writer.Flush(); }
public void AddPendingCommit(CommitLogRecord commit, long postPosition) { if (commit == null) { throw new InvalidOperationException("Cannot commit a null transaction"); } if (Transactions.TryGetValue(commit.CorrelationId, out var transaction)) { transaction.CommitTransaction(commit); Records.Add(commit); } else { throw new InvalidOperationException("Cannot commit an unknown transaction"); } PostPosition = postPosition; }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); _db = new TFChunkDb(new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 16 * 1024, 0, new InMemoryCheckpoint(), new InMemoryCheckpoint(), new ICheckpoint[0])); _db.OpenVerifyAndClean(); var chunk = _db.Manager.GetChunk(0); _p1 = LogRecord.SingleWrite(0, Guid.NewGuid(), Guid.NewGuid(), "es-to-scavenge", ExpectedVersion.Any, "et1", new byte[] { 0, 1, 2 }, new byte[] { 5, 7 }); _res1 = chunk.TryAppend(_p1); _c1 = LogRecord.Commit(_res1.NewPosition, Guid.NewGuid(), _p1.LogPosition, 0); _cres1 = chunk.TryAppend(_c1); _p2 = LogRecord.SingleWrite(_cres1.NewPosition, Guid.NewGuid(), Guid.NewGuid(), "es-to-scavenge", ExpectedVersion.Any, "et1", new byte[] { 0, 1, 2 }, new byte[] { 5, 7 }); _res2 = chunk.TryAppend(_p2); _c2 = LogRecord.Commit(_res2.NewPosition, Guid.NewGuid(), _p2.LogPosition, 1); _cres2 = chunk.TryAppend(_c2); _p3 = LogRecord.SingleWrite(_cres2.NewPosition, Guid.NewGuid(), Guid.NewGuid(), "es-to-scavenge", ExpectedVersion.Any, "et1", new byte[] { 0, 1, 2 }, new byte[] { 5, 7 }); _res3 = chunk.TryAppend(_p3); _c3 = LogRecord.Commit(_res3.NewPosition, Guid.NewGuid(), _p3.LogPosition, 2); _cres3 = chunk.TryAppend(_c3); chunk.Complete(); var scavenger = new TFChunkScavenger(_db, new FakeReadIndex(x => x == "es-to-scavenge")); scavenger.Scavenge(alwaysKeepScavenged: true); _scavengedChunk = _db.Manager.GetChunk(0); }
public void PreCommit(CommitLogRecord commit) { string streamId = null; long eventNumber = EventNumber.Invalid; PrepareLogRecord lastPrepare = null; foreach (var prepare in GetTransactionPrepares(commit.TransactionPosition, commit.LogPosition)) { if (prepare.Flags.HasNoneOf(PrepareFlags.StreamDelete | PrepareFlags.Data)) { continue; } if (streamId == null) { streamId = prepare.EventStreamId; } if (prepare.EventStreamId != streamId) { throw new Exception(string.Format("Expected stream: {0}, actual: {1}.", streamId, prepare.EventStreamId)); } eventNumber = prepare.Flags.HasAnyOf(PrepareFlags.StreamDelete) ? EventNumber.DeletedStream : commit.FirstEventNumber + prepare.TransactionOffset; lastPrepare = prepare; _committedEvents.PutRecord(prepare.EventId, new EventInfo(streamId, eventNumber), throwOnDuplicate: false); } if (eventNumber != EventNumber.Invalid) { _streamVersions.Put(streamId, eventNumber, +1); } if (lastPrepare != null && SystemStreams.IsMetastream(streamId)) { var rawMeta = lastPrepare.Data; _streamRawMetas.Put(SystemStreams.OriginalStreamOf(streamId), new StreamMeta(rawMeta, null), +1); } }
protected override void WriteTestScenario() { _event1 = WriteStreamCreated("ES"); // chunk 1 _event2 = WriteSingleEvent("ES", 1, new string('.', 3000)); _event3 = WriteSingleEvent("ES", 2, new string('.', 3000)); _event4 = WriteSingleEvent("ES", 3, new string('.', 3000)); _event5 = WriteSingleEvent("ES", 4, new string('.', 3000), retryOnFail: true); // chunk 2 _event6 = WriteSingleEvent("ES", 5, new string('.', 3000)); _event7prepare = WriteDeletePrepare("ES"); _event7commit = WriteDeleteCommit(_event7prepare); _event7 = new EventRecord(EventNumber.DeletedStream, _event7prepare); _event8 = WriteStreamCreated("ES2"); _event9 = WriteSingleEvent("ES2", 1, new string('.', 5000), retryOnFail: true); //chunk 3 Scavenge(completeLast: false); }
// must use the commit to see if these are the first events in the stream // and for checkpointing. public void Confirm( IList <IPrepareLogRecord <string> > prepares, CommitLogRecord commit, bool catchingUp, IIndexBackend <string> backend) { if (catchingUp) { // after the main index is caught up we will initialize the stream existence filter return; } if (prepares.Count != 0 && commit.FirstEventNumber == 0) { var lastPrepare = prepares[prepares.Count - 1]; _existenceFilter.Add(lastPrepare.EventStreamId); } _existenceFilter.CurrentCheckpoint = commit.LogPosition; }
public void AddPendingCommit(CommitLogRecord commit, long postPosition) { PendingTransaction transaction; if (_pendingTransactions.TryGetValue(commit.TransactionPosition, out transaction)) { var newTransaction = new PendingTransaction(commit.TransactionPosition, postPosition, transaction.Prepares, commit); if (!_pendingTransactions.TryUpdate(commit.TransactionPosition, newTransaction, transaction)) { throw new InvalidOperationException("Failed to update pending commit"); } } else { var pendingTransaction = new PendingTransaction(commit.TransactionPosition, postPosition, commit); if (!_pendingTransactions.TryAdd(commit.TransactionPosition, pendingTransaction)) { throw new InvalidOperationException("Failed to add pending commit"); } } }
public void SetUp() { _writerCheckpoint = new InMemoryCheckpoint(); _db = new TFChunkDb(new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 1024, 0, _writerCheckpoint, new InMemoryCheckpoint(), new ICheckpoint[0])); _db.OpenVerifyAndClean(); _writer = new TFChunkWriter(_db); _writer.Open(); _record = new CommitLogRecord(logPosition: 0xFEED, correlationId: _eventId, transactionPosition: 4321, timeStamp: new DateTime(2012, 12, 21), firstEventNumber: 10); long newPos; _writer.Write(_record, out newPos); _writer.Flush(); }
public override void When() { _eventId = Guid.NewGuid(); _transactionId = Guid.NewGuid(); var recordFactory = LogFormatHelper <TLogFormat, TStreamId> .RecordFactory; var streamId = LogFormatHelper <TLogFormat, TStreamId> .StreamId; var eventTypeId = LogFormatHelper <TLogFormat, TStreamId> .EventTypeId; var record = LogRecord.Prepare( factory: recordFactory, logPosition: 0, eventId: _eventId, correlationId: _transactionId, transactionPos: 0xDEAD, transactionOffset: 0xBEEF, eventStreamId: streamId, expectedVersion: 1234, timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.Data, eventType: eventTypeId, data: new byte[] { 1, 2, 3, 4, 5 }, metadata: new byte[] { 7, 17 }); Assert.True(Writer.Write(record, out _logPosition)); Writer.Flush(); IndexCommitter.AddPendingPrepare(new[] { record }, _logPosition); var record2 = new CommitLogRecord( logPosition: _logPosition, correlationId: _transactionId, transactionPosition: 0, timeStamp: new DateTime(2012, 12, 21), firstEventNumber: 10); Assert.True(Writer.Write(record2, out _logPosition)); Writer.Flush(); }
public long GetCommitLastEventNumber(CommitLogRecord commit) { long eventNumber = EventNumber.Invalid; var lastCommitPosition = Interlocked.Read(ref _lastCommitPosition); if (commit.LogPosition < lastCommitPosition || (commit.LogPosition == lastCommitPosition && !_indexRebuild)) { return(eventNumber); } foreach (var prepare in GetTransactionPrepares(commit.TransactionPosition, commit.LogPosition)) { if (prepare.Flags.HasNoneOf(PrepareFlags.StreamDelete | PrepareFlags.Data)) { continue; } eventNumber = prepare.Flags.HasAllOf(PrepareFlags.StreamDelete) ? EventNumber.DeletedStream : commit.FirstEventNumber + prepare.TransactionOffset; } return(eventNumber); }
public void SetUp() { _writerCheckpoint = new InMemoryCheckpoint(); _db = new TFChunkDb(new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 1024, 0, _writerCheckpoint, new InMemoryCheckpoint(), new ICheckpoint[0])); _db.OpenVerifyAndClean(); _writer = new TFChunkWriter(_db); _writer.Open(); _record = new CommitLogRecord(logPosition: 0xFEED, correlationId: _eventId, transactionPosition: 4321, timeStamp: new DateTime(2012, 12, 21), eventNumber: 10); long newPos; _writer.Write(_record, out newPos); _writer.Flush(); }
private void CheckStreamVersion(string streamId, int newEventNumber, CommitLogRecord commit) { if (newEventNumber == EventNumber.DeletedStream) { return; } int lastEventNumber = _indexReader.GetStreamLastEventNumber(streamId); if (newEventNumber != lastEventNumber + 1) { if (Debugger.IsAttached) { Debugger.Break(); } else { throw new Exception( string.Format("Commit invariant violation: new event number {0} doesn't correspond to current stream version {1}.\n" + "Stream ID: {2}.\nCommit: {3}.", newEventNumber, lastEventNumber, streamId, commit)); } } }
public int Commit(CommitLogRecord commit, bool isTfEof) { return Commit(commit, isTfEof, false); }
private int Commit(CommitLogRecord commit, bool isTfEof, bool doingInit) { int eventNumber = EventNumber.Invalid; var lastCommitPosition = Interlocked.Read(ref _lastCommitPosition); if (commit.LogPosition < lastCommitPosition || (commit.LogPosition == lastCommitPosition && !doingInit)) return eventNumber; // already committed var shit = GetTransactionPrepares(commit.TransactionPosition, commit.LogPosition).ToList(); return Commit(shit, isTfEof, doingInit, commit.FirstEventNumber, true); }
public long GetCommitLastEventNumber(CommitLogRecord commit) { return(_indexCommitter.GetCommitLastEventNumber(commit)); }
public void Commit(CommitLogRecord commit) { bool first = true; int eventNumber = -1; uint streamHash = 0; string eventStreamId = null; foreach (var prepare in GetTransactionPrepares(commit.TransactionPosition)) { if (first) { streamHash = _hasher.Hash(prepare.EventStreamId); eventStreamId = prepare.EventStreamId; first = false; } else { Debug.Assert(prepare.EventStreamId == eventStreamId); } bool addToIndex = false; if ((prepare.Flags & PrepareFlags.StreamDelete) != 0) { eventNumber = EventNumber.DeletedStream; _committedEvents.PutRecord(prepare.EventId, Tuple.Create(eventStreamId, eventNumber), throwOnDuplicate: false); addToIndex = commit.LogPosition > _persistedCommitCheckpoint || commit.LogPosition == _persistedCommitCheckpoint && prepare.LogPosition > _persistedPrepareCheckpoint; } else if ((prepare.Flags & PrepareFlags.Data) != 0) { eventNumber = commit.EventNumber + prepare.TransactionOffset; _committedEvents.PutRecord(prepare.EventId, Tuple.Create(eventStreamId, eventNumber), throwOnDuplicate: false); addToIndex = commit.LogPosition > _persistedCommitCheckpoint || commit.LogPosition == _persistedCommitCheckpoint && prepare.LogPosition > _persistedPrepareCheckpoint; } // could be just empty prepares for TransactionBegin and TransactionEnd, for instance // or records which are rebuilt but are already in PTables if (addToIndex) { #if DEBUG long pos; if (_tableIndex.TryGetOneValue(streamHash, eventNumber, out pos)) { EventRecord rec; if (ReadEvent(eventStreamId, eventNumber, out rec) == SingleReadResult.Success) { Debugger.Break(); throw new Exception( string.Format( "Trying to add duplicate event #{0} for stream {1}(hash {2})\nCommit: {3}\nPrepare: {4}.", eventNumber, eventStreamId, streamHash, commit, prepare)); } } #endif _tableIndex.Add(commit.LogPosition, streamHash, eventNumber, prepare.LogPosition); _bus.Publish(new ReplicationMessage.EventCommited(commit.LogPosition, eventNumber, prepare)); } _lastCommitPosition = Math.Max(_lastCommitPosition, commit.LogPosition); } }
public void Commit(CommitLogRecord commit) { var lastCommitPosition = Interlocked.Read(ref _lastCommitPosition); if (commit.LogPosition < lastCommitPosition || (commit.LogPosition == lastCommitPosition && !_indexRebuild)) return; // already committed uint streamHash = 0; string streamId = null; int eventNumber = int.MinValue; var indexEntries = new List<IndexEntry>(); var prepares = new List<PrepareLogRecord>(); foreach (var prepare in GetTransactionPrepares(commit.TransactionPosition, commit.LogPosition)) { if ((prepare.Flags & (PrepareFlags.StreamDelete | PrepareFlags.Data)) == 0) continue; if (streamId == null) { streamId = prepare.EventStreamId; streamHash = _hasher.Hash(prepare.EventStreamId); } else Debug.Assert(prepare.EventStreamId == streamId); eventNumber = (prepare.Flags & PrepareFlags.StreamDelete) != 0 ? EventNumber.DeletedStream : commit.FirstEventNumber + prepare.TransactionOffset; _committedEvents.PutRecord(prepare.EventId, Tuple.Create(streamId, eventNumber), throwOnDuplicate: false); var addToIndex = commit.LogPosition > _persistedCommitCheckpoint || commit.LogPosition == _persistedCommitCheckpoint && prepare.LogPosition > _persistedPrepareCheckpoint; if (addToIndex) { indexEntries.Add(new IndexEntry(streamHash, eventNumber, prepare.LogPosition)); prepares.Add(prepare); } } if (indexEntries.Count > 0) { if (_additionalCommitChecks) { CheckStreamVersion(streamId, indexEntries[0].Version, commit); CheckDuplicateEvents(streamHash, commit, indexEntries, prepares); } _tableIndex.AddEntries(commit.LogPosition, indexEntries); // atomically add a whole bulk of entries } if (eventNumber != int.MinValue) { if (eventNumber < 0) throw new Exception(string.Format("EventNumber {0} is incorrect.", eventNumber)); _streamInfoCache.Put(streamId, key => new StreamCacheInfo(eventNumber, null), (key, old) => new StreamCacheInfo(eventNumber, old.Metadata)); if (SystemStreams.IsMetastream(streamId)) { // if we are committing to metastream, we need to invalidate metastream cache // TODO AN: race condition in setting/clearing metadata // in the meantime GetStreamMetadataCached could be trying to set stale metadata _streamInfoCache.Put(SystemStreams.OriginalStreamOf(streamId), key => new StreamCacheInfo(-1, null), (key, old) => new StreamCacheInfo(old.LastEventNumber, null)); } } var newLastCommitPosition = commit.LogPosition > lastCommitPosition ? commit.LogPosition : lastCommitPosition; if (Interlocked.CompareExchange(ref _lastCommitPosition, newLastCommitPosition, lastCommitPosition) != lastCommitPosition) throw new Exception("Concurrency error in ReadIndex.Commit: _lastCommitPosition was modified during Commit execution!"); for (int i = 0, n = indexEntries.Count; i < n; ++i) { _bus.Publish(new StorageMessage.EventCommited(commit.LogPosition, new EventRecord(indexEntries[i].Version, prepares[i]))); } }
public CommitInfo(CommitLogRecord commitRecord) { EventNumber = commitRecord.FirstEventNumber; }
private void CheckDuplicateEvents(uint streamHash, CommitLogRecord commit, IList<IndexEntry> indexEntries, IList<PrepareLogRecord> prepares) { using (var reader = _backend.BorrowReader()) { var entries = _tableIndex.GetRange(streamHash, indexEntries[0].Version, indexEntries[indexEntries.Count - 1].Version); foreach (var indexEntry in entries) { var prepare = prepares[indexEntry.Version - indexEntries[0].Version]; PrepareLogRecord indexedPrepare = GetPrepare(reader, indexEntry.Position); if (indexedPrepare != null && indexedPrepare.EventStreamId == prepare.EventStreamId) { if (Debugger.IsAttached) Debugger.Break(); else throw new Exception( string.Format("Trying to add duplicate event #{0} to stream {1} (hash {2})\nCommit: {3}\n" + "Prepare: {4}\nIndexed prepare: {5}.", indexEntry.Version, prepare.EventStreamId, streamHash, commit, prepare, indexedPrepare)); } } } }
public void Commit(CommitLogRecord record) { throw new NotImplementedException(); }
public int Commit(CommitLogRecord commit, bool isTfEof) { int eventNumber = EventNumber.Invalid; var lastCommitPosition = Interlocked.Read(ref _lastCommitPosition); if (commit.LogPosition < lastCommitPosition || (commit.LogPosition == lastCommitPosition && !_indexRebuild)) return eventNumber; // already committed string streamId = null; uint streamHash = 0; var indexEntries = new List<IndexEntry>(); var prepares = new List<PrepareLogRecord>(); foreach (var prepare in GetTransactionPrepares(commit.TransactionPosition, commit.LogPosition)) { if (prepare.Flags.HasNoneOf(PrepareFlags.StreamDelete | PrepareFlags.Data)) continue; if (streamId == null) { streamId = prepare.EventStreamId; streamHash = _hasher.Hash(prepare.EventStreamId); } else { if (prepare.EventStreamId != streamId) throw new Exception(string.Format("Expected stream: {0}, actual: {1}.", streamId, prepare.EventStreamId)); } eventNumber = prepare.Flags.HasAllOf(PrepareFlags.StreamDelete) ? EventNumber.DeletedStream : commit.FirstEventNumber + prepare.TransactionOffset; if (new TFPos(commit.LogPosition, prepare.LogPosition) > new TFPos(_persistedCommitPos, _persistedPreparePos)) { indexEntries.Add(new IndexEntry(streamHash, eventNumber, prepare.LogPosition)); prepares.Add(prepare); } } if (indexEntries.Count > 0) { if (_additionalCommitChecks) { CheckStreamVersion(streamId, indexEntries[0].Version, commit); CheckDuplicateEvents(streamHash, commit, indexEntries, prepares); } _tableIndex.AddEntries(commit.LogPosition, indexEntries); // atomically add a whole bulk of entries } if (eventNumber != EventNumber.Invalid) { if (eventNumber < 0) throw new Exception(string.Format("EventNumber {0} is incorrect.", eventNumber)); _backend.SetStreamLastEventNumber(streamId, eventNumber); if (SystemStreams.IsMetastream(streamId)) _backend.SetStreamMetadata(SystemStreams.OriginalStreamOf(streamId), null); // invalidate cached metadata if (streamId == SystemStreams.SettingsStream) _backend.SetSystemSettings(DeserializeSystemSettings(prepares[prepares.Count - 1].Data)); } var newLastCommitPosition = Math.Max(commit.LogPosition, lastCommitPosition); if (Interlocked.CompareExchange(ref _lastCommitPosition, newLastCommitPosition, lastCommitPosition) != lastCommitPosition) throw new Exception("Concurrency error in ReadIndex.Commit: _lastCommitPosition was modified during Commit execution!"); for (int i = 0, n = indexEntries.Count; i < n; ++i) { _bus.Publish( new StorageMessage.EventCommitted( commit.LogPosition, new EventRecord(indexEntries[i].Version, prepares[i]), isTfEof && i == n - 1)); } return eventNumber; }
private void CheckStreamVersion(string streamId, int newEventNumber, CommitLogRecord commit) { if (newEventNumber == EventNumber.DeletedStream) return; int lastEventNumber = _indexReader.GetStreamLastEventNumber(streamId); if (newEventNumber != lastEventNumber + 1) { if (Debugger.IsAttached) Debugger.Break(); else throw new Exception( string.Format("Commit invariant violation: new event number {0} doesn't correspond to current stream version {1}.\n" + "Stream ID: {2}.\nCommit: {3}.", newEventNumber, lastEventNumber, streamId, commit)); } }
public void PreCommit(CommitLogRecord commit) { string streamId = null; int eventNumber = int.MinValue; PrepareLogRecord lastPrepare = null; foreach (var prepare in GetTransactionPrepares(commit.TransactionPosition, commit.LogPosition)) { if (prepare.Flags.HasNoneOf(PrepareFlags.StreamDelete | PrepareFlags.Data)) continue; if (streamId == null) streamId = prepare.EventStreamId; if (prepare.EventStreamId != streamId) throw new Exception(string.Format("Expected stream: {0}, actual: {1}.", streamId, prepare.EventStreamId)); eventNumber = prepare.Flags.HasAnyOf(PrepareFlags.StreamDelete) ? EventNumber.DeletedStream : commit.FirstEventNumber + prepare.TransactionOffset; lastPrepare = prepare; _committedEvents.PutRecord(prepare.EventId, new EventInfo(streamId, eventNumber), throwOnDuplicate: false); } if (eventNumber != int.MinValue) _streamVersions.Put(streamId, eventNumber, +1); if (lastPrepare != null && SystemStreams.IsMetastream(streamId)) { var rawMeta = lastPrepare.Data; _streamRawMetas.Put(SystemStreams.OriginalStreamOf(streamId), new StreamMeta(rawMeta, null), +1); } }
public PendingTransaction(long transactionPosition, long postPosition, CommitLogRecord commit) { TransactionPosition = transactionPosition; PostPosition = postPosition; _commit = commit; }
private void CheckDuplicateEvents(uint streamHash, CommitLogRecord commit, IList<IndexEntry> indexEntries, IList<PrepareLogRecord> prepares) { var reader = _readers.Get(); try { foreach (var indexEntry in _tableIndex.GetRange(streamHash, indexEntries[0].Version, indexEntries[indexEntries.Count-1].Version)) { var res = GetEventRecord(reader, indexEntry); var prepare = prepares[indexEntry.Version - indexEntries[0].Version]; if (res.Success && res.Record.EventStreamId == prepare.EventStreamId) { if (Debugger.IsAttached) Debugger.Break(); else { throw new Exception(string.Format( "Trying to add duplicate event #{0} to stream {1} (hash {2})\nCommit: {3}\nPrepare: {4}\nPresent record: {5}.", indexEntry.Version, prepare.EventStreamId, streamHash, commit, prepare, res.Record)); } } } } finally { _readers.Return(reader); } }
public void SetPendingCommit(CommitLogRecord commit) { _commit = commit; }
public void Commit(CommitLogRecord commit) { var lastCommitPosition = Interlocked.Read(ref _lastCommitPosition); if (commit.LogPosition < lastCommitPosition || (commit.LogPosition == lastCommitPosition && !_indexRebuild)) return; // already committed bool first = true; int eventNumber = -1; uint streamHash = 0; string streamId = null; var indexEntries = new List<IndexEntry>(); var prepares = new List<PrepareLogRecord>(); foreach (var prepare in GetTransactionPrepares(commit.TransactionPosition, commit.LogPosition)) { if (first) { streamHash = _hasher.Hash(prepare.EventStreamId); streamId = prepare.EventStreamId; first = false; } else Debug.Assert(prepare.EventStreamId == streamId); bool addToIndex = false; if ((prepare.Flags & PrepareFlags.StreamDelete) != 0) { eventNumber = EventNumber.DeletedStream; _committedEvents.PutRecord(prepare.EventId, Tuple.Create(streamId, eventNumber), throwOnDuplicate: false); addToIndex = commit.LogPosition > _persistedCommitCheckpoint || commit.LogPosition == _persistedCommitCheckpoint && prepare.LogPosition > _persistedPrepareCheckpoint; } else if ((prepare.Flags & PrepareFlags.Data) != 0) { eventNumber = commit.FirstEventNumber + prepare.TransactionOffset; _committedEvents.PutRecord(prepare.EventId, Tuple.Create(streamId, eventNumber), throwOnDuplicate: false); addToIndex = commit.LogPosition > _persistedCommitCheckpoint || commit.LogPosition == _persistedCommitCheckpoint && prepare.LogPosition > _persistedPrepareCheckpoint; } // could be just empty prepares for TransactionBegin and TransactionEnd, for instance // or records which are rebuilt but are already in PTables if (addToIndex) { #if CHECK_COMMIT_DUPLICATES long pos; if (_tableIndex.TryGetOneValue(streamHash, eventNumber, out pos)) { var res = ((IReadIndex)this).ReadEvent(streamId, eventNumber); if (res.Result == ReadEventResult.Success) { Debugger.Break(); throw new Exception( string.Format( "Trying to add duplicate event #{0} for stream {1}(hash {2})\nCommit: {3}\nPrepare: {4}.", eventNumber, streamId, streamHash, commit, prepare)); } } #endif indexEntries.Add(new IndexEntry(streamHash, eventNumber, prepare.LogPosition)); prepares.Add(prepare); } } if (indexEntries.Count > 0) { _tableIndex.AddEntries(commit.LogPosition, indexEntries); // atomically add a whole bulk of entries for (int i = 0, n = indexEntries.Count; i < n; ++i) { _bus.Publish(new StorageMessage.EventCommited(commit.LogPosition, new EventRecord(indexEntries[i].Version, prepares[i]))); } } var newLastCommitPosition = commit.LogPosition > lastCommitPosition ? commit.LogPosition : lastCommitPosition; if (Interlocked.CompareExchange(ref _lastCommitPosition, newLastCommitPosition, lastCommitPosition) != lastCommitPosition) throw new Exception("Concurrency error in ReadIndex.Commit: _lastCommitPosition was modified during Commit execution!"); if (first) { // we got here because all prepares of this commit was scavenged, // so we don't add anything to cache, to table index, anywhere // we just pretend this commit was already processed and scavenged :) return; } _streamInfoCache.Put(streamId, key => new StreamCacheInfo(eventNumber, null), (key, old) => new StreamCacheInfo(eventNumber, old.Metadata)); }