public void the_data_is_written() { //TODO MAKE THIS ACTUALLY ASSERT OFF THE FILE AND READER FROM KNOWN FILE using (var reader = new TFChunkChaser(_db, _writerCheckpoint, _db.Config.ChaserCheckpoint, false)) { reader.Open(); LogRecord r; Assert.IsTrue(reader.TryReadNext(out r)); Assert.True(r is PrepareLogRecord); var p = (PrepareLogRecord)r; Assert.AreEqual(p.RecordType, LogRecordType.Prepare); Assert.AreEqual(p.LogPosition, 0); Assert.AreEqual(p.TransactionPosition, 0xDEAD); Assert.AreEqual(p.TransactionOffset, 0xBEEF); Assert.AreEqual(p.CorrelationId, _correlationId); Assert.AreEqual(p.EventId, _eventId); Assert.AreEqual(p.EventStreamId, "WorldEnding"); Assert.AreEqual(p.ExpectedVersion, 1234); Assert.AreEqual(p.TimeStamp, new DateTime(2012, 12, 21)); Assert.AreEqual(p.Flags, PrepareFlags.SingleWrite); Assert.AreEqual(p.EventType, "type"); Assert.AreEqual(p.Data.Length, 5); Assert.AreEqual(p.Metadata.Length, 2); } }
public void try_read_returns_false_when_writer_checkpoint_is_zero() { var writerchk = new InMemoryCheckpoint(0); var db = new TFChunkDb(new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 10000, 0, writerchk, new InMemoryCheckpoint(), new InMemoryCheckpoint(-1), new InMemoryCheckpoint(-1))); db.Open(); var chaser = new TFChunkChaser(db, writerchk, new InMemoryCheckpoint()); chaser.Open(); LogRecord record; Assert.IsFalse(chaser.TryReadNext(out record)); chaser.Close(); db.Dispose(); }
public void the_data_is_written() { //TODO MAKE THIS ACTUALLY ASSERT OFF THE FILE AND READER FROM KNOWN FILE using (var reader = new TFChunkChaser(_db, _writerCheckpoint, _db.Config.ChaserCheckpoint, false)) { reader.Open(); ILogRecord r; Assert.IsTrue(reader.TryReadNext(out r)); var streamId = LogFormatHelper <TLogFormat, TStreamId> .StreamId; var eventTypeId = LogFormatHelper <TLogFormat, TStreamId> .EventTypeId; Assert.True(r is IPrepareLogRecord <TStreamId>); var p = (IPrepareLogRecord <TStreamId>)r; Assert.AreEqual(p.RecordType, LogRecordType.Prepare); Assert.AreEqual(p.LogPosition, 0); Assert.AreEqual(p.TransactionPosition, 0xDEAD); Assert.AreEqual(p.TransactionOffset, 0xBEEF); Assert.AreEqual(p.CorrelationId, _correlationId); Assert.AreEqual(p.EventId, _eventId); Assert.AreEqual(p.EventStreamId, streamId); Assert.AreEqual(p.ExpectedVersion, 1234); Assert.That(p.TimeStamp, Is.EqualTo(new DateTime(2012, 12, 21)).Within(7).Milliseconds); Assert.AreEqual(p.Flags, PrepareFlags.SingleWrite); Assert.AreEqual(p.EventType, eventTypeId); Assert.AreEqual(p.Data.Length, 5); Assert.AreEqual(p.Metadata.Length, 2); } }
public void try_read_returns_false_when_writer_checksum_is_equal_to_reader_checksum() { var writerchk = new InMemoryCheckpoint(); var chaserchk = new InMemoryCheckpoint(Checkpoint.Chaser, 0); var db = new TFChunkDb(new TFChunkDbConfig(PathName, new PrefixFileNamingStrategy(PathName, "prefix.tf"), 10000, 0, writerchk, chaserchk, new[] { writerchk, chaserchk })); db.OpenVerifyAndClean(); writerchk.Write(12); writerchk.Flush(); chaserchk.Write(12); chaserchk.Flush(); var chaser = new TFChunkChaser(db, writerchk, chaserchk); chaser.Open(); LogRecord record; Assert.IsFalse(chaser.TryReadNext(out record)); Assert.AreEqual(12, chaserchk.Read()); chaser.Close(); db.Dispose(); }
private IEnumerable <PrepareLogRecord> GetTransactionPrepares(long transactionBeginPos) { var reader = GetReader(); RecordReadResult result; try { result = reader.TryReadAt(transactionBeginPos); } finally { ReturnReader(reader); reader = null; } if (!result.Success) { throw new InvalidOperationException("Couldn't read record which is supposed to be in file."); } Debug.Assert(result.LogRecord.RecordType == LogRecordType.Prepare, "Incorrect type of log record, expected Prepare record."); var transactionRecord = (PrepareLogRecord)result.LogRecord; if ((transactionRecord.Flags & PrepareFlags.TransactionEnd) != 0) { yield return(transactionRecord); yield break; } var chaser = new TFChunkChaser(_db, _writerCheckpoint, new InMemoryCheckpoint(transactionBeginPos)); while (true) { result = chaser.TryReadNext(); if (!result.Success) { throw new InvalidOperationException("Couldn't read record which is supposed to be in file."); } var prepare = result.LogRecord as PrepareLogRecord; if (prepare != null && prepare.TransactionPosition == transactionBeginPos && prepare.EventStreamId == transactionRecord.EventStreamId) { yield return(prepare); if ((prepare.Flags & PrepareFlags.TransactionEnd) != 0) { yield break; } } } }
public void try_read_returns_record_when_writerchecksum_ahead() { var recordToWrite = new PrepareLogRecord(logPosition: 0, correlationId: _correlationId, eventId: _eventId, transactionPosition: 0, transactionOffset: 0, eventStreamId: "WorldEnding", expectedVersion: 1234, timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: "type", data: new byte[] { 1, 2, 3, 4, 5 }, metadata: new byte[] { 7, 17 }); using (var fs = new FileStream(GetFilePathFor("chunk-000000.000000"), FileMode.CreateNew, FileAccess.Write)) { fs.SetLength(ChunkHeader.Size + ChunkFooter.Size + 10000); var chunkHeader = new ChunkHeader(TFChunk.CurrentChunkVersion, 10000, 0, 0, false, Guid.NewGuid()).AsByteArray(); var writer = new BinaryWriter(fs); writer.Write(chunkHeader); recordToWrite.WriteWithLengthPrefixAndSuffixTo(writer); fs.Close(); } var writerchk = new InMemoryCheckpoint(128); var chaserchk = new InMemoryCheckpoint(Checkpoint.Chaser, 0); var db = new TFChunkDb(new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 10000, 0, writerchk, chaserchk, new InMemoryCheckpoint(-1), new InMemoryCheckpoint(-1))); db.Open(); var chaser = new TFChunkChaser(db, writerchk, chaserchk); chaser.Open(); LogRecord record; var recordRead = chaser.TryReadNext(out record); chaser.Close(); Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix(), chaserchk.Read()); Assert.IsTrue(recordRead); Assert.AreEqual(recordToWrite, record); db.Close(); }
public void try_read_returns_record_when_record_bigger_than_internal_buffer() { var writerchk = new InMemoryCheckpoint(0); var chaserchk = new InMemoryCheckpoint(Checkpoint.Chaser, 0); var db = new TFChunkDb(new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 10000, 0, writerchk, chaserchk, new InMemoryCheckpoint(-1), new InMemoryCheckpoint(-1))); db.Open(); var recordToWrite = new PrepareLogRecord(logPosition: 0, correlationId: _correlationId, eventId: _eventId, transactionPosition: 0, transactionOffset: 0, eventStreamId: "WorldEnding", expectedVersion: 1234, timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: "type", data: new byte[9000], metadata: new byte[] { 7, 17 }); var writer = new TFChunkWriter(db); writer.Open(); long pos; Assert.IsTrue(writer.Write(recordToWrite, out pos)); writer.Close(); writerchk.Write(recordToWrite.GetSizeWithLengthPrefixAndSuffix()); var reader = new TFChunkChaser(db, writerchk, chaserchk); reader.Open(); LogRecord record; var readRecord = reader.TryReadNext(out record); reader.Close(); Assert.IsTrue(readRecord); Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix(), chaserchk.Read()); Assert.AreEqual(recordToWrite, record); db.Close(); }
public void try_read_returns_record_when_writerchecksum_ahead() { var recordFactory = LogFormatHelper <TLogFormat, TStreamId> .RecordFactory; var streamId = LogFormatHelper <TLogFormat, TStreamId> .StreamId; var eventTypeId = LogFormatHelper <TLogFormat, TStreamId> .EventTypeId; var recordToWrite = LogRecord.Prepare( factory: recordFactory, logPosition: 0, correlationId: _correlationId, eventId: _eventId, transactionPos: 0, transactionOffset: 0, eventStreamId: streamId, expectedVersion: 1234, timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: eventTypeId, data: new byte[] { 1, 2, 3, 4, 5 }, metadata: new byte[] { 7, 17 }); using (var fs = new FileStream(GetFilePathFor("chunk-000000.000000"), FileMode.CreateNew, FileAccess.Write)) { fs.SetLength(ChunkHeader.Size + ChunkFooter.Size + 10000); var chunkHeader = new ChunkHeader(TFChunk.CurrentChunkVersion, 10000, 0, 0, false, Guid.NewGuid()) .AsByteArray(); var writer = new BinaryWriter(fs); writer.Write(chunkHeader); recordToWrite.WriteWithLengthPrefixAndSuffixTo(writer); fs.Close(); } var writerchk = new InMemoryCheckpoint(recordToWrite.GetSizeWithLengthPrefixAndSuffix() + 16); var chaserchk = new InMemoryCheckpoint(Checkpoint.Chaser, 0); var db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, writerchk, chaserchk)); db.Open(); var chaser = new TFChunkChaser(db, writerchk, chaserchk, false); chaser.Open(); ILogRecord record; var recordRead = chaser.TryReadNext(out record); chaser.Close(); Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix(), chaserchk.Read()); Assert.IsTrue(recordRead); Assert.AreEqual(recordToWrite, record); db.Close(); }
public void try_read_returns_record_when_record_bigger_than_internal_buffer() { var writerchk = new InMemoryCheckpoint(0); var chaserchk = new InMemoryCheckpoint(Checkpoint.Chaser, 0); var db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, writerchk, chaserchk)); db.Open(); var recordFactory = LogFormatHelper <TLogFormat, TStreamId> .RecordFactory; var streamId = LogFormatHelper <TLogFormat, TStreamId> .StreamId; var eventTypeId = LogFormatHelper <TLogFormat, TStreamId> .EventTypeId; var recordToWrite = LogRecord.Prepare( factory: recordFactory, logPosition: 0, correlationId: _correlationId, eventId: _eventId, transactionPos: 0, transactionOffset: 0, eventStreamId: streamId, expectedVersion: 1234, timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: eventTypeId, data: new byte[9000], metadata: new byte[] { 7, 17 }); var writer = new TFChunkWriter(db); writer.Open(); long pos; Assert.IsTrue(writer.Write(recordToWrite, out pos)); writer.Close(); writerchk.Write(recordToWrite.GetSizeWithLengthPrefixAndSuffix()); var reader = new TFChunkChaser(db, writerchk, chaserchk, false); reader.Open(); ILogRecord record; var readRecord = reader.TryReadNext(out record); reader.Close(); Assert.IsTrue(readRecord); Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix(), chaserchk.Read()); Assert.AreEqual(recordToWrite, record); db.Close(); }
public void try_read_returns_record_when_writerchecksum_equal() { var writerchk = new InMemoryCheckpoint(0); var chaserchk = new InMemoryCheckpoint(Checkpoint.Chaser, 0); var db = new TFChunkDb(new TFChunkDbConfig(PathName, new PrefixFileNamingStrategy(PathName, "prefix.tf"), 10000, 0, writerchk, new[] { chaserchk })); db.OpenVerifyAndClean(); var recordToWrite = new PrepareLogRecord(logPosition: 0, correlationId: _correlationId, eventId: _eventId, transactionPosition: 0, eventStreamId: "WorldEnding", expectedVersion: 1234, timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: "type", data: new byte[] { 1, 2, 3, 4, 5 }, metadata: new byte[] { 7, 17 }); var writer = new TFChunkWriter(db); writer.Open(); long pos; Assert.IsTrue(writer.Write(recordToWrite, out pos)); writer.Close(); writerchk.Write(recordToWrite.GetSizeWithLengthPrefix()); var chaser = new TFChunkChaser(db, writerchk, chaserchk); chaser.Open(); LogRecord record; var readRecord = chaser.TryReadNext(out record); chaser.Close(); Assert.IsTrue(readRecord); Assert.AreEqual(record.GetSizeWithLengthPrefix(), chaserchk.Read()); Assert.AreEqual(recordToWrite, record); db.Close(); }
public void try_read_returns_record_when_writerchecksum_equal() { var writerchk = new InMemoryCheckpoint(0); var chaserchk = new InMemoryCheckpoint(Checkpoint.Chaser, 0); var db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, writerchk, chaserchk)); db.Open(); _logFormat.StreamNameIndex.GetOrAddId("WorldEnding", out var streamId, out _, out _); var recordToWrite = LogRecord.Prepare( factory: _logFormat.RecordFactory, logPosition: 0, correlationId: _correlationId, eventId: _eventId, transactionPos: 0, transactionOffset: 0, eventStreamId: streamId, expectedVersion: 1234, timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: "type", data: new byte[] { 1, 2, 3, 4, 5 }, metadata: new byte[] { 7, 17 }); var writer = new TFChunkWriter(db); writer.Open(); long pos; Assert.IsTrue(writer.Write(recordToWrite, out pos)); writer.Close(); writerchk.Write(recordToWrite.GetSizeWithLengthPrefixAndSuffix()); var chaser = new TFChunkChaser(db, writerchk, chaserchk, false); chaser.Open(); ILogRecord record; var readRecord = chaser.TryReadNext(out record); chaser.Close(); Assert.IsTrue(readRecord); Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix(), chaserchk.Read()); Assert.AreEqual(recordToWrite, record); db.Close(); }
public void the_data_is_written() { using (var reader = new TFChunkChaser(_db, _writerCheckpoint, _db.Config.ChaserCheckpoint, false)) { reader.Open(); ILogRecord r; Assert.IsTrue(reader.TryReadNext(out r)); Assert.True(r is CommitLogRecord); var c = (CommitLogRecord)r; Assert.AreEqual(c.RecordType, LogRecordType.Commit); Assert.AreEqual(c.LogPosition, 0); Assert.AreEqual(c.CorrelationId, _eventId); Assert.AreEqual(c.TransactionPosition, 4321); Assert.AreEqual(c.TimeStamp, new DateTime(2012, 12, 21)); } }
public void Build() { _tableIndex.Initialize(); _persistedPrepareCheckpoint = _tableIndex.PrepareCheckpoint; _persistedCommitCheckpoint = _tableIndex.CommitCheckpoint.ReadNonFlushed(); foreach (var rdr in _readers) { rdr.Open(); } long pos = Math.Max(0, _persistedCommitCheckpoint); long processed = 0; var chaser = new TFChunkChaser(_db, _writerCheckpoint, new InMemoryCheckpoint(pos)); RecordReadResult result; while ((result = chaser.TryReadNext()).Success) { //Debug.WriteLine(result.LogRecord); switch (result.LogRecord.RecordType) { case LogRecordType.Prepare: { //Prepare((PrepareLogRecord) result.LogRecord); break; } case LogRecordType.Commit: { Commit((CommitLogRecord)result.LogRecord); break; } default: throw new ArgumentOutOfRangeException(); } processed += 1; if (processed % 100000 == 0) { Log.Debug("ReadIndex Rebuilding: processed {0} records.", processed); } } }
public void try_read_returns_false_when_writer_checkpoint_is_zero() { var writerchk = new InMemoryCheckpoint(0); var chaserchk = new InMemoryCheckpoint(); var db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, writerchk, chaserchk)); db.Open(); var chaser = new TFChunkChaser(db, writerchk, new InMemoryCheckpoint(), false); chaser.Open(); ILogRecord record; Assert.IsFalse(chaser.TryReadNext(out record)); chaser.Close(); db.Dispose(); }
public void try_read_returns_false_when_writer_checkpoint_is_zero() { var writerchk = new InMemoryCheckpoint(0); var db = new TFChunkDb(new TFChunkDbConfig(PathName, new PrefixFileNamingStrategy(PathName, "prefix.tf"), 10000, 0, writerchk, new ICheckpoint[0])); db.OpenVerifyAndClean(); var chaser = new TFChunkChaser(db, writerchk, new InMemoryCheckpoint()); chaser.Open(); LogRecord record; Assert.IsFalse(chaser.TryReadNext(out record)); chaser.Close(); db.Dispose(); }
public void try_read_returns_false_when_writer_checksum_is_equal_to_reader_checksum() { var writerchk = new InMemoryCheckpoint(); var chaserchk = new InMemoryCheckpoint(Checkpoint.Chaser, 0); var db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, writerchk, chaserchk)); db.Open(); writerchk.Write(12); writerchk.Flush(); chaserchk.Write(12); chaserchk.Flush(); var chaser = new TFChunkChaser(db, writerchk, chaserchk, false); chaser.Open(); ILogRecord record; Assert.IsFalse(chaser.TryReadNext(out record)); Assert.AreEqual(12, chaserchk.Read()); chaser.Close(); db.Dispose(); }
private List <ResolvedEventRecord> ReadEventsFromTFInternal(long fromCommitPosition, long afterPreparePosition, int maxCount, bool resolveLinks) { var records = new List <ResolvedEventRecord>(); long lastAddedCommit = 0; long lastAddedPrepare = -1; var count = 0; //var nextReadCommitPosition = fromCommitPosition; var chaser = new TFChunkChaser(_db, _writerCheckpoint, new InMemoryCheckpoint(fromCommitPosition)); while (count < maxCount) { var result = chaser.TryReadNext(); // skip until commit as we may start from just last know prepare position while (result.Success && result.LogRecord.RecordType != LogRecordType.Commit) { result = chaser.TryReadNext(); } if (!result.Success) { break; } var commitLogRecord = (CommitLogRecord)result.LogRecord; // if (commitLogRecord.Position < nextReadCommitPosition) // { // throw new Exception( // string.Format("Commit record has been read at past position. First requested: {0} Read: {1}", // nextReadCommitPosition, // commitLogRecord.Position)); // } // if (result.NewPosition <= commitLogRecord.Position) // { // throw new Exception( // string.Format("Invalid new position has been returned. Record position: {0}. New position: {1}", // commitLogRecord.Position, // result.NewPosition)); // } //nextReadCommitPosition = result.NewPosition; // likely prepare - but we will skip it var commitChaser = new TFChunkChaser(_db, _writerCheckpoint, new InMemoryCheckpoint(commitLogRecord.TransactionPosition)); //long nextPreparePosition = commitLogRecord.TransactionPosition; //long nextPrepareMustBeGreaterThan = nextPreparePosition; long transactionPosition = commitLogRecord.TransactionPosition; int nextEventNumber = commitLogRecord.EventNumber; while (count < maxCount) { // if (nextPreparePosition >= commitLogRecord.Position) // { // throw new Exception( // string.Format("Did not find the end of the transaction. Commit: {0} Transaction: {1} current: {2}", // commitLogRecord.Position, // transactionPosition, // nextPreparePosition)); // } result = commitChaser.TryReadNext(); if (!result.Success) { throw new Exception(string.Format("Cannot read TF at position."));//" {0}", nextPreparePosition)); } //nextPreparePosition = result.NewPosition; if (result.LogRecord.RecordType != LogRecordType.Prepare) { continue; } var prepareRecord = (PrepareLogRecord)result.LogRecord; //if (prepareRecord.Position < nextPrepareMustBeGreaterThan) // throw new Exception("TF order is incorrect"); //nextPrepareMustBeGreaterThan = result.NewPosition; if (prepareRecord.TransactionPosition == transactionPosition) { if (prepareRecord.LogPosition > afterPreparePosition) // AFTER means > { if (commitLogRecord.Position < lastAddedCommit || commitLogRecord.Position == lastAddedCommit && prepareRecord.Position <= lastAddedPrepare) { throw new Exception(string.Format( "events were read in invalid order. Last event position was {0}/{1}. " + "Attempt to add event with position: {2}/{3}", lastAddedCommit, lastAddedPrepare, commitLogRecord.Position, prepareRecord.Position)); } lastAddedCommit = commitLogRecord.Position; lastAddedPrepare = prepareRecord.Position; var eventRecord = new EventRecord(nextEventNumber, prepareRecord); EventRecord linkToEvent = null; if (resolveLinks) { var resolved = ResolveLinkToEvent(eventRecord); if (resolved != null) { linkToEvent = eventRecord; eventRecord = resolved; } } records.Add(new ResolvedEventRecord(eventRecord, linkToEvent, commitLogRecord.Position)); count++; } nextEventNumber++; if ((prepareRecord.Flags & PrepareFlags.TransactionEnd) != 0) { break; } } } } return(records); }