public void try_read_does_not_cache_anything_and_returns_record_once_it_is_written_later() { var writerchk = new InMemoryCheckpoint(0); var config = new TransactionFileDatabaseConfig(PathName, "prefix.tf", 10000, writerchk, new ICheckpoint[0]); // create db var writer = new MultifileTransactionFileWriter(config); writer.Open(); var reader = new MultifileTransactionFileChaser(config); reader.Open(); LogRecord record; Assert.IsFalse(reader.TryReadNext(out record)); var rec = LogRecord.SingleWrite(0, Guid.NewGuid(), Guid.NewGuid(), "ES", -1, "ET", new byte[] { 7 }, null); long tmp; Assert.IsTrue(writer.Write(rec, out tmp)); writer.Flush(); writer.Close(); Assert.IsTrue(reader.TryReadNext(out record)); Assert.AreEqual(rec, record); reader.Close(); }
public void with_not_enough_files_to_reach_checksum_throws() { var config = new TransactionFileDatabaseConfig(PathName, "prefix.tf", 10000, new InMemoryCheckpoint(15000), new List<ICheckpoint>()); File.WriteAllBytes(Path.Combine(PathName, config.FileNamingStrategy.GetFilenameFor(0)), new byte[10000]); var validator = new TransactionFileDatabaseValidator(config); Assert.Throws<CorruptDatabaseException>(validator.Validate); }
public void try_read_returns_record_when_writerchecksum_ahead() { var writerchk = new InMemoryCheckpoint(128); var readerchk = new InMemoryCheckpoint("reader", 0); var config = new TransactionFileDatabaseConfig(PathName, "prefix.tf", 10000, writerchk, new[] {readerchk}); var recordToWrite = new PrepareLogRecord(logPosition: 0, correlationId: _correlationId, eventId: _eventId, transactionPosition: 0, eventStreamId: "WorldEnding", expectedVersion: 1234, timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: "type", data: new byte[] {1, 2, 3, 4, 5}, metadata: new byte[] {7, 17}); using (var fs = new FileStream(Path.Combine(PathName, "prefix.tf0"), FileMode.CreateNew, FileAccess.Write)) { var writer = new BinaryWriter(fs); recordToWrite.WriteWithLengthPrefixTo(writer); fs.Close(); } var reader = new MultifileTransactionFileChaser(config, "reader"); reader.Open(); LogRecord record = null; var recordRead = reader.TryReadNext(out record); reader.Close(); Assert.AreEqual(record.GetSizeWithLengthPrefix(), readerchk.Read()); Assert.IsTrue(recordRead); Assert.AreEqual(recordToWrite, record); }
public void with_file_of_wrong_size_higher_than_checksum_the_file_is_deleted() { var config = new TransactionFileDatabaseConfig(PathName, "prefix.tf", 10000, new InMemoryCheckpoint(500), new List<ICheckpoint>()); File.WriteAllText(Path.Combine(PathName, config.FileNamingStrategy.GetFilenameFor(0)), "this is just some test blahbydy blah"); var validator = new TransactionFileDatabaseValidator(config); var ex = Assert.Throws<CorruptDatabaseException>(validator.Validate); Assert.IsInstanceOf<BadChunkInDatabaseException>(ex.InnerException); }
public void allows_with_exactly_enough_file_to_reach_checksum() { var config = new TransactionFileDatabaseConfig(PathName, "prefix.tf", 10000, new InMemoryCheckpoint(10000), new List<ICheckpoint>()); File.WriteAllBytes(Path.Combine(PathName, config.FileNamingStrategy.GetFilenameFor(0)), new byte[10000]); var validator = new TransactionFileDatabaseValidator(config); Assert.DoesNotThrow(validator.Validate); }
public void with_wrong_size_file_less_than_checksum_throws() { var config = new TransactionFileDatabaseConfig(PathName, "prefix.tf", 10000, new InMemoryCheckpoint(15000), new List<ICheckpoint>()); File.WriteAllBytes(Path.Combine(PathName, config.FileNamingStrategy.GetFilenameFor(0)), new byte[10000]); File.WriteAllBytes(Path.Combine(PathName, config.FileNamingStrategy.GetFilenameFor(1)), new byte[9000]); var validator = new TransactionFileDatabaseValidator(config); var ex = Assert.Throws<CorruptDatabaseException>(validator.Validate); Assert.IsInstanceOf<BadChunkInDatabaseException>(ex.InnerException); }
public void when_in_first_extraneous_files_throws_corrupt_database_exception() { var config = new TransactionFileDatabaseConfig(PathName, "prefix.tf", 10000, new InMemoryCheckpoint(9000), new List<ICheckpoint>()); File.WriteAllBytes(Path.Combine(PathName, config.FileNamingStrategy.GetFilenameFor(0)), new byte[10000]); File.WriteAllBytes(Path.Combine(PathName, config.FileNamingStrategy.GetFilenameFor(1)), new byte[10000]); var validator = new TransactionFileDatabaseValidator(config); var ex = Assert.Throws<CorruptDatabaseException>(validator.Validate); Assert.IsInstanceOf<ExtraneousFileFoundException>(ex.InnerException); }
public void try_read_returns_false_when_writer_checksum_is_zero() { var writerchk = new InMemoryCheckpoint(0); var config = new TransactionFileDatabaseConfig(PathName, "prefix.tf", 10000, writerchk, new List<ICheckpoint>()); File.WriteAllBytes(Path.Combine(PathName, "prefix.tf0"), new byte[10000]); var reader = new MultifileTransactionFileChaser(config); reader.Open(); LogRecord record; Assert.IsFalse(reader.TryReadNext(out record)); reader.Close(); }
public void try_read_returns_false_when_writer_checksum_is_equal_to_reader_checksum() { var writerchk = new InMemoryCheckpoint(12); var readerchk = new InMemoryCheckpoint("reader", 12); var config = new TransactionFileDatabaseConfig(PathName, "prefix.tf", 10000, writerchk, new[] {readerchk}); File.WriteAllBytes(Path.Combine(PathName, "prefix.tf0"), new byte[10000]); var reader = new MultifileTransactionFileChaser(config, "reader"); reader.Open(); LogRecord record; Assert.IsFalse(reader.TryReadNext(out record)); Assert.AreEqual(12, readerchk.Read()); reader.Close(); }
public void try_read_returns_false_when_writer_checksum_is_zero() { var writerchk = new InMemoryCheckpoint(0); var config = new TransactionFileDatabaseConfig(PathName, "prefix.tf", 10000, writerchk, new ICheckpoint[0]); // create db var writer = new MultifileTransactionFileWriter(config); writer.Open(); writer.Close(); var reader = new MultifileTransactionFileChaser(config); reader.Open(); LogRecord record; Assert.IsFalse(reader.TryReadNext(out record)); reader.Close(); }
public void can_read_a_record_straddling_multiple_files() { var writerchk = new InMemoryCheckpoint(20020); var readerchk = new InMemoryCheckpoint("reader", 9990); var config = new TransactionFileDatabaseConfig(PathName, "prefix.tf", 10000, writerchk, new[] {readerchk}); var recordToWrite = new PrepareLogRecord(logPosition: 0, correlationId: _correlationId, eventId: _eventId, transactionPosition: 0, transactionOffset: 0, eventStreamId: "WorldEnding", expectedVersion: 1234, timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: "type", data: new byte[] { 1, 2, 3, 4, 5 }, metadata: new byte[] {7, 17}); var memstream = new MemoryStream(); var writer = new BinaryWriter(memstream); recordToWrite.WriteWithLengthPrefixAndSuffixTo(writer); var buf = memstream.GetBuffer(); using (var fs = new FileStream(config.FileNamingStrategy.GetFilenameFor(0), FileMode.CreateNew, FileAccess.Write)) { fs.Seek(9990, SeekOrigin.Begin); fs.Write(buf, 0, 10); fs.Close(); } using (var fs = new FileStream(config.FileNamingStrategy.GetFilenameFor(1), FileMode.CreateNew, FileAccess.Write)) { fs.Seek(0, SeekOrigin.Begin); fs.Write(buf, 10, recordToWrite.GetSizeWithLengthPrefixAndSuffix() - 10); fs.Close(); } var reader = new MultifileTransactionFileChaser(config, readerchk); reader.Open(); LogRecord record; var readRecord = reader.TryReadNext(out record); reader.Close(); Assert.IsTrue(readRecord); Assert.That(recordToWrite, Is.EqualTo(record)); Assert.AreEqual(9990 + recordToWrite.GetSizeWithLengthPrefixAndSuffix(), readerchk.Read()); }
public void should_not_read_buffered_data() { var writerchk = new InMemoryCheckpoint(0); var readerchk = new InMemoryCheckpoint("reader", 0); var config = new TransactionFileDatabaseConfig(PathName, "prefix.tf", 10000, writerchk, new List<ICheckpoint> {readerchk}); var fileName = Path.Combine(PathName, "prefix.tf0"); File.Create(fileName).Close(); var reader = new MultifileTransactionFileBulkRetriever(config); reader.Open(0); Assert.IsTrue(reader.ReadNextBulk().Length == 0); var bytes = new byte[100]; new Random().NextBytes(bytes); using (var f = new FileStream(fileName, FileMode.Append, FileAccess.Write, FileShare.ReadWrite)) { f.Write(bytes, 0, bytes.Length); f.Flush(flushToDisk: true); } writerchk.Write(bytes.Length); var readBytes = reader.ReadNextBulk(); Assert.AreEqual(bytes, readBytes); bytes = new byte[100]; new Random().NextBytes(bytes); using (var f = new FileStream(fileName, FileMode.Append, FileAccess.Write, FileShare.ReadWrite)) { f.Write(bytes, 0, bytes.Length); f.Flush(flushToDisk: true); } writerchk.Write(writerchk.Read() + bytes.Length); readBytes = reader.ReadNextBulk(); Assert.AreEqual(bytes, readBytes); reader.Close(); }
public TransactionFileDatabaseValidator(TransactionFileDatabaseConfig config) { Ensure.NotNull(config, "config"); _config = config; }
public void a_null_checksum_result_in_argumentnullexception() { var config = new TransactionFileDatabaseConfig("C:\\tmp", "prefix.tf", 10000, new InMemoryCheckpoint(), new List<ICheckpoint>()); Assert.Throws<ArgumentNullException>(() => new MultifileTransactionFileChaser(config, null)); }
public void when_a_reader_checksum_is_ahead_of_writer_checksum_throws_corrupt_database_exception() { var config = new TransactionFileDatabaseConfig(PathName, "prefix.tf", 10000, new InMemoryCheckpoint(0), new List<ICheckpoint> {new InMemoryCheckpoint(11)}); File.WriteAllBytes(Path.Combine(PathName, config.FileNamingStrategy.GetFilenameFor(0)), new byte[10000]); var validator = new TransactionFileDatabaseValidator(config); var ex = Assert.Throws<CorruptDatabaseException>(validator.Validate); Assert.IsInstanceOf<ReaderCheckpointHigherThanWriterException>(ex.InnerException); }
public void try_read_returns_properly_when_writer_is_written_to_while_chasing() { var writerchk = new InMemoryCheckpoint(0); var readerchk = new InMemoryCheckpoint("reader", 0); var config = new TransactionFileDatabaseConfig(PathName, "prefix.tf", 10000, writerchk, new[] {readerchk}); var fileName = Path.Combine(PathName, "prefix.tf0"); File.Create(fileName).Close(); var reader = new MultifileTransactionFileChaser(config, "reader"); reader.Open(); LogRecord record; Assert.IsFalse(reader.TryReadNext(out record)); var recordToWrite = new PrepareLogRecord(logPosition: 0, correlationId: _correlationId, eventId: _eventId, transactionPosition: 0, eventStreamId: "WorldEnding", expectedVersion: 1234, timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: "type", data: new byte[] { 1, 2, 3, 4, 5 }, metadata: new byte[] {7, 17}); var memstream = new MemoryStream(); var writer = new BinaryWriter(memstream); recordToWrite.WriteWithLengthPrefixTo(writer); using (var fs = new FileStream(fileName, FileMode.Append, FileAccess.Write, FileShare.ReadWrite)) { fs.Write(memstream.ToArray(), 0, (int)memstream.Length); fs.Flush(flushToDisk: true); } writerchk.Write(memstream.Length); Assert.IsTrue(reader.TryReadNext(out record)); Assert.AreEqual(record, recordToWrite); var recordToWrite2 = new PrepareLogRecord(logPosition: 0, correlationId: _correlationId, eventId: _eventId, transactionPosition: 0, eventStreamId: "WorldEnding", expectedVersion: 4321, timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: "type", data: new byte[] { 3, 2, 1 }, metadata: new byte[] {9}); memstream.SetLength(0); recordToWrite2.WriteWithLengthPrefixTo(writer); using (var fs = new FileStream(fileName, FileMode.Append, FileAccess.Write, FileShare.ReadWrite)) { fs.Write(memstream.ToArray(), 0, (int) memstream.Length); fs.Flush(flushToDisk: true); } writerchk.Write(writerchk.Read() + memstream.Length); Assert.IsTrue(reader.TryReadNext(out record)); Assert.AreEqual(record, recordToWrite2); reader.Close(); }