public void try_read_does_not_cache_anything_and_returns_record_once_it_is_written_later() { var writerchk = new InMemoryCheckpoint(0); var db = new TFChunkDb(new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 10000, 0, writerchk, new InMemoryCheckpoint())); db.OpenVerifyAndClean(); var writer = new TFChunkWriter(db); writer.Open(); var reader = new TFChunkSequentialReader(db, writerchk, 0); LogRecord record; Assert.IsFalse(reader.TryReadNext(out record)); var rec = LogRecord.SingleWrite(0, Guid.NewGuid(), Guid.NewGuid(), "ES", -1, "ET", new byte[] { 7 }, null); long tmp; Assert.IsTrue(writer.Write(rec, out tmp)); writer.Flush(); writer.Close(); Assert.IsTrue(reader.TryReadNext(out record)); Assert.AreEqual(rec, record); reader.Close(); db.Close(); }
public void temporary_files_are_removed() { var config = new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 100, 0, new InMemoryCheckpoint(150), new InMemoryCheckpoint(), new ICheckpoint[0]); var db = new TFChunkDb(config); CreateChunk(Path.Combine(PathName, "chunk-000000.000000"), config.ChunkSize, config.ChunkSize); CreateChunk(Path.Combine(PathName, "chunk-000001.000001"), config.ChunkSize, config.ChunkSize); File.Create(Path.Combine(PathName, "bla")).Close(); File.Create(Path.Combine(PathName, "bla.scavenge.tmp")).Close(); File.Create(Path.Combine(PathName, "bla.tmp")).Close(); Assert.DoesNotThrow(() => db.OpenVerifyAndClean(verifyHash: false)); Assert.IsTrue(File.Exists(Path.Combine(PathName, "chunk-000000.000000"))); Assert.IsTrue(File.Exists(Path.Combine(PathName, "chunk-000001.000001"))); Assert.IsTrue(File.Exists(Path.Combine(PathName, "bla"))); Assert.AreEqual(3, Directory.GetFiles(PathName, "*").Length); db.Dispose(); }
public void when_checkpoint_is_exactly_on_the_boundary_of_chunk_the_last_chunk_could_be_present() { var config = new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 100, 0, new InMemoryCheckpoint(200), new InMemoryCheckpoint(), new ICheckpoint[0]); var db = new TFChunkDb(config); CreateChunk(Path.Combine(PathName, "chunk-000000.000000"), config.ChunkSize, config.ChunkSize); CreateChunk(Path.Combine(PathName, "chunk-000001.000001"), config.ChunkSize, config.ChunkSize); CreateOngoingChunk(Path.Combine(PathName, "chunk-000002.000000"), config.ChunkSize, config.ChunkSize); Assert.DoesNotThrow(() => db.OpenVerifyAndClean(verifyHash: false)); Assert.IsNotNull(db.Manager.GetChunk(2)); Assert.IsTrue(File.Exists(Path.Combine(PathName, "chunk-000000.000000"))); Assert.IsTrue(File.Exists(Path.Combine(PathName, "chunk-000001.000001"))); Assert.IsTrue(File.Exists(Path.Combine(PathName, "chunk-000002.000000"))); Assert.AreEqual(3, Directory.GetFiles(PathName, "*").Length); db.Dispose(); }
public void SetUp() { _writerCheckpoint = new InMemoryCheckpoint(); _db = new TFChunkDb(new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 1024, 0, _writerCheckpoint, new InMemoryCheckpoint(), new ICheckpoint[0])); _db.OpenVerifyAndClean(); _writer = new TFChunkWriter(_db); _writer.Open(); _record = new PrepareLogRecord(logPosition: 0xDEAD, eventId: _eventId, correlationId: _correlationId, transactionPosition: 0xDEAD, transactionOffset: 0xBEEF, eventStreamId: "WorldEnding", expectedVersion: 1234, timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.SingleWrite, eventType: "type", data: new byte[] { 1, 2, 3, 4, 5 }, metadata: new byte[] { 7, 17 }); long newPos; _writer.Write(_record, out newPos); _writer.Flush(); }
public void old_version_of_chunks_are_removed() { File.Create(Path.Combine(PathName, "foo")).Close(); File.Create(Path.Combine(PathName, "bla")).Close(); var config = new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 100, 0, new InMemoryCheckpoint(400), new ICheckpoint[0]); var db = new TFChunkDb(config); CreateChunk(Path.Combine(PathName, "chunk-000000.000000"), config.ChunkSize, config.ChunkSize); CreateChunk(Path.Combine(PathName, "chunk-000000.000002"), config.ChunkSize, config.ChunkSize); CreateChunk(Path.Combine(PathName, "chunk-000000.000005"), config.ChunkSize, config.ChunkSize); CreateChunk(Path.Combine(PathName, "chunk-000001.000000"), config.ChunkSize, config.ChunkSize); CreateChunk(Path.Combine(PathName, "chunk-000001.000001"), config.ChunkSize, config.ChunkSize); CreateChunk(Path.Combine(PathName, "chunk-000002.000000"), config.ChunkSize, config.ChunkSize); CreateChunk(Path.Combine(PathName, "chunk-000003.000007"), config.ChunkSize, config.ChunkSize); CreateChunk(Path.Combine(PathName, "chunk-000003.000008"), config.ChunkSize, config.ChunkSize); Assert.DoesNotThrow(() => db.OpenVerifyAndClean(verifyHash: false)); Assert.IsTrue(File.Exists(Path.Combine(PathName, "foo"))); Assert.IsTrue(File.Exists(Path.Combine(PathName, "bla"))); Assert.IsTrue(File.Exists(Path.Combine(PathName, "chunk-000000.000005"))); Assert.IsTrue(File.Exists(Path.Combine(PathName, "chunk-000001.000001"))); Assert.IsTrue(File.Exists(Path.Combine(PathName, "chunk-000002.000000"))); Assert.IsTrue(File.Exists(Path.Combine(PathName, "chunk-000003.000008"))); Assert.AreEqual(6, Directory.GetFiles(PathName, "*").Length); db.Dispose(); }
public void try_read_returns_false_when_writer_checksum_is_equal_to_reader_checksum() { var writerchk = new InMemoryCheckpoint(); var chaserchk = new InMemoryCheckpoint(Checkpoint.Chaser, 0); var db = new TFChunkDb(new TFChunkDbConfig(PathName, new PrefixFileNamingStrategy(PathName, "prefix.tf"), 10000, 0, writerchk, chaserchk, new[] { writerchk, chaserchk })); db.OpenVerifyAndClean(); writerchk.Write(12); writerchk.Flush(); chaserchk.Write(12); chaserchk.Flush(); var chaser = new TFChunkChaser(db, writerchk, chaserchk); chaser.Open(); LogRecord record; Assert.IsFalse(chaser.TryReadNext(out record)); Assert.AreEqual(12, chaserchk.Read()); chaser.Close(); db.Dispose(); }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); WriterCheckpoint = new InMemoryCheckpoint(0); var chaserchk = new InMemoryCheckpoint(Checkpoint.Chaser, 0); Db = new TFChunkDb(new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 10000, 0, WriterCheckpoint, new[] { chaserchk })); Db.OpenVerifyAndClean(); // create db Writer = new TFChunkWriter(Db); Writer.Open(); WriteTestScenario(); Writer.Close(); Writer = null; WriterCheckpoint.Flush(); chaserchk.Write(WriterCheckpoint.Read()); chaserchk.Flush(); TableIndex = new TableIndex(Path.Combine(PathName, "index"), () => new HashListMemTable(), _maxEntriesInMemTable); TableIndex.Initialize(); var reader = new TFChunkReader(Db, Db.Config.WriterCheckpoint); ReadIndex = new ReadIndex(new NoopPublisher(), 2, () => new TFChunkSequentialReader(Db, Db.Config.WriterCheckpoint, 0), () => reader, TableIndex, new ByLengthHasher()); ReadIndex.Build(); }
public void when_checkpoint_is_on_boundary_of_new_chunk_excessive_last_chunks_are_removed_if_present_and_new_empty_one_is_created() { var config = new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 100, 0, new InMemoryCheckpoint(200), new InMemoryCheckpoint(), new ICheckpoint[0]); var db = new TFChunkDb(config); CreateChunk(Path.Combine(PathName, "chunk-000000.000000"), config.ChunkSize, config.ChunkSize); CreateChunk(Path.Combine(PathName, "chunk-000001.000001"), config.ChunkSize, config.ChunkSize); CreateChunk(Path.Combine(PathName, "chunk-000002.000000"), config.ChunkSize, config.ChunkSize); CreateChunk(Path.Combine(PathName, "chunk-000002.000001"), config.ChunkSize, config.ChunkSize); Assert.DoesNotThrow(() => db.OpenVerifyAndClean(verifyHash: false)); Assert.IsTrue(File.Exists(Path.Combine(PathName, "chunk-000000.000000"))); Assert.IsTrue(File.Exists(Path.Combine(PathName, "chunk-000001.000001"))); Assert.IsTrue(File.Exists(Path.Combine(PathName, "chunk-000002.000000"))); Assert.AreEqual(3, Directory.GetFiles(PathName, "*").Length); db.Dispose(); }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); _db = new TFChunkDb(new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 4096, 0, new InMemoryCheckpoint(), new ICheckpoint[0])); _db.OpenVerifyAndClean(); var chunk = _db.Manager.GetChunk(0); _records = new LogRecord[RecordsCount]; _results = new RecordWriteResult[RecordsCount]; for (int i = 0; i < _records.Length; ++i) { _records[i] = LogRecord.SingleWrite(i == 0 ? 0 : _results[i - 1].NewPosition, Guid.NewGuid(), Guid.NewGuid(), "es1", ExpectedVersion.Any, "et1", new byte[] { 0, 1, 2 }, new byte[] { 5, 7 }); _results[i] = chunk.TryAppend(_records[i]); } chunk.Flush(); _db.Config.WriterCheckpoint.Write(_results[RecordsCount - 1].NewPosition); _db.Config.WriterCheckpoint.Flush(); }
public void a_record_can_be_written() { var filename = Path.Combine(PathName, "prefix.tf0"); var chunkHeader = new ChunkHeader(TFChunk.CurrentChunkVersion, 10000, 0, 0, 0); var chunkBytes = chunkHeader.AsByteArray(); var buf = new byte[ChunkHeader.Size + ChunkFooter.Size + chunkHeader.ChunkSize]; Buffer.BlockCopy(chunkBytes, 0, buf, 0, chunkBytes.Length); File.WriteAllBytes(filename, buf); _checkpoint = new InMemoryCheckpoint(137); var db = new TFChunkDb(new TFChunkDbConfig(PathName, new PrefixFileNamingStrategy(PathName, "prefix.tf"), chunkHeader.ChunkSize, 0, _checkpoint, new ICheckpoint[0])); db.OpenVerifyAndClean(); var writer = new TFChunkWriter(db); var record = new PrepareLogRecord(logPosition: 0, correlationId: _correlationId, eventId: _eventId, transactionPosition: 0, transactionOffset: 0, eventStreamId: "WorldEnding", expectedVersion: 1234, timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: "type", data: new byte[8000], metadata: new byte[] { 7, 17 }); Console.WriteLine(record.GetSizeWithLengthPrefixAndSuffix()); Console.WriteLine(record.GetSizeWithLengthPrefixAndSuffix() + 137); long pos; Assert.IsTrue(writer.Write(record, out pos)); writer.Close(); db.Dispose(); Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix() + 137, _checkpoint.Read()); using (var filestream = File.Open(filename, FileMode.Open, FileAccess.Read)) { filestream.Seek(ChunkHeader.Size + 137 + sizeof(int), SeekOrigin.Begin); var reader = new BinaryReader(filestream); var read = LogRecord.ReadFrom(reader); Console.WriteLine(string.Join("\n", Directory.EnumerateFiles(PathName))); Assert.AreEqual(record, read); } }
public void with_a_writer_checksum_of_zero_and_no_files_is_valid() { var db = new TFChunkDb(new TFChunkDbConfig(PathName, new PrefixFileNamingStrategy(PathName, "prefix.tf"), 10000, 0, new InMemoryCheckpoint(0), new ICheckpoint[0])); Assert.DoesNotThrow(() => db.OpenVerifyAndClean()); db.Dispose(); }
public void a_record_can_be_written() { var filename = Path.Combine(PathName, "prefix.tf0"); var chunkHeader = new ChunkHeader(TFChunk.CurrentChunkVersion, 10000, 0, 0, 0); var chunkBytes = chunkHeader.AsByteArray(); var buf = new byte[ChunkHeader.Size + ChunkFooter.Size + chunkHeader.ChunkSize]; Buffer.BlockCopy(chunkBytes, 0, buf, 0, chunkBytes.Length); File.WriteAllBytes(filename, buf); _checkpoint = new InMemoryCheckpoint(137); var db = new TFChunkDb(new TFChunkDbConfig(PathName, new PrefixFileNamingStrategy(PathName, "prefix.tf"), chunkHeader.ChunkSize, 0, _checkpoint, new InMemoryCheckpoint(), new ICheckpoint[0])); db.OpenVerifyAndClean(); var bytes = new byte[3994]; // this gives exactly 4097 size of record, with 3993 (rec size 4096) everything works fine! new Random().NextBytes(bytes); var writer = new TFChunkWriter(db); var record = new PrepareLogRecord(logPosition: 123, correlationId: _correlationId, eventId: _eventId, transactionPosition: 789, transactionOffset: 543, eventStreamId: "WorldEnding", expectedVersion: 1234, timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.SingleWrite, eventType: "type", data: bytes, metadata: new byte[] { 0x07, 0x17 }); long pos; Assert.IsTrue(writer.Write(record, out pos)); writer.Close(); db.Dispose(); Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix() + 137, _checkpoint.Read()); using (var filestream = File.Open(filename, FileMode.Open, FileAccess.Read)) { filestream.Seek(ChunkHeader.Size + 137 + sizeof(int), SeekOrigin.Begin); var reader = new BinaryReader(filestream); var read = LogRecord.ReadFrom(reader); Assert.AreEqual(record, read); } }
public void try_read_returns_record_when_writerchecksum_ahead() { var recordToWrite = new PrepareLogRecord(logPosition: 0, correlationId: _correlationId, eventId: _eventId, transactionPosition: 0, transactionOffset: 0, eventStreamId: "WorldEnding", expectedVersion: 1234, timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: "type", data: new byte[] { 1, 2, 3, 4, 5 }, metadata: new byte[] { 7, 17 }); using (var fs = new FileStream(Path.Combine(PathName, "prefix.tf0"), FileMode.CreateNew, FileAccess.Write)) { fs.SetLength(ChunkHeader.Size + ChunkFooter.Size + 10000); var chunkHeader = new ChunkHeader(TFChunk.CurrentChunkVersion, 10000, 0, 0, 0).AsByteArray(); var writer = new BinaryWriter(fs); writer.Write(chunkHeader); recordToWrite.WriteWithLengthPrefixAndSuffixTo(writer); fs.Close(); } var writerchk = new InMemoryCheckpoint(128); var chaserchk = new InMemoryCheckpoint(Checkpoint.Chaser, 0); var db = new TFChunkDb(new TFChunkDbConfig(PathName, new PrefixFileNamingStrategy(PathName, "prefix.tf"), 10000, 0, writerchk, chaserchk, new[] { chaserchk })); db.OpenVerifyAndClean(); var chaser = new TFChunkChaser(db, writerchk, chaserchk); chaser.Open(); LogRecord record; var recordRead = chaser.TryReadNext(out record); chaser.Close(); Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix(), chaserchk.Read()); Assert.IsTrue(recordRead); Assert.AreEqual(recordToWrite, record); db.Close(); }
public void allows_no_files_when_checkpoint_is_zero() { var config = new TFChunkDbConfig(PathName, new PrefixFileNamingStrategy(PathName, "prefix.tf"), 10000, 0, new InMemoryCheckpoint(), new ICheckpoint[0]); var db = new TFChunkDb(config); Assert.DoesNotThrow(() => db.OpenVerifyAndClean(verifyHash: false)); db.Dispose(); }
public void with_a_writer_checksum_of_nonzero_and_no_files_a_corrupted_database_exception_is_thrown() { var db = new TFChunkDb(new TFChunkDbConfig(PathName, new PrefixFileNamingStrategy(PathName, "prefix.tf"), 10000, 0, new InMemoryCheckpoint(500), new ICheckpoint[0])); var exc = Assert.Throws <CorruptDatabaseException>(() => db.OpenVerifyAndClean()); Assert.IsInstanceOf <ChunkNotFoundException>(exc.InnerException); db.Dispose(); }
public void try_read_returns_record_when_writerchecksum_equal() { var writerchk = new InMemoryCheckpoint(0); var chaserchk = new InMemoryCheckpoint(Checkpoint.Chaser, 0); var db = new TFChunkDb(new TFChunkDbConfig(PathName, new PrefixFileNamingStrategy(PathName, "prefix.tf"), 10000, 0, writerchk, chaserchk, new[] { chaserchk })); db.OpenVerifyAndClean(); var recordToWrite = new PrepareLogRecord(logPosition: 0, correlationId: _correlationId, eventId: _eventId, transactionPosition: 0, transactionOffset: 0, eventStreamId: "WorldEnding", expectedVersion: 1234, timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: "type", data: new byte[] { 1, 2, 3, 4, 5 }, metadata: new byte[] { 7, 17 }); var writer = new TFChunkWriter(db); writer.Open(); long pos; Assert.IsTrue(writer.Write(recordToWrite, out pos)); writer.Close(); writerchk.Write(recordToWrite.GetSizeWithLengthPrefixAndSuffix()); var chaser = new TFChunkChaser(db, writerchk, chaserchk); chaser.Open(); LogRecord record; var readRecord = chaser.TryReadNext(out record); chaser.Close(); Assert.IsTrue(readRecord); Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix(), chaserchk.Read()); Assert.AreEqual(recordToWrite, record); db.Close(); }
public void a_record_can_be_written() { var filename = Path.Combine(PathName, "prefix.tf0"); var chunkHeader = new ChunkHeader(TFChunk.CurrentChunkVersion, 10000, 0, 0, 0); var chunkBytes = chunkHeader.AsByteArray(); var bytes = new byte[ChunkHeader.Size + 10000 + ChunkFooter.Size]; Buffer.BlockCopy(chunkBytes, 0, bytes, 0, chunkBytes.Length); File.WriteAllBytes(filename, bytes); _checkpoint = new InMemoryCheckpoint(137); var db = new TFChunkDb(new TFChunkDbConfig(PathName, new PrefixFileNamingStrategy(PathName, "prefix.tf"), 10000, 0, _checkpoint, new InMemoryCheckpoint(), new ICheckpoint[0])); db.OpenVerifyAndClean(); var tf = new TFChunkWriter(db); var record = new PrepareLogRecord(logPosition: 0, correlationId: _correlationId, eventId: _eventId, expectedVersion: 1234, transactionPosition: 0, transactionOffset: 0, eventStreamId: "WorldEnding", timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: "type", data: new byte[] { 1, 2, 3, 4, 5 }, metadata: new byte[] { 7, 17 }); long tmp; tf.Write(record, out tmp); //tf.Flush(); tf.Close(); db.Dispose(); Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix() + 137, _checkpoint.Read()); //137 is fluff assigned to beginning of checkpoint //TODO actually read the event using (var filestream = File.Open(filename, FileMode.Open, FileAccess.Read)) { filestream.Seek(ChunkHeader.Size + 137 + sizeof(int), SeekOrigin.Begin); var reader = new BinaryReader(filestream); var read = LogRecord.ReadFrom(reader); Assert.AreEqual(record, read); } }
public void allows_first_correct_file_when_checkpoint_is_zero() { var config = new TFChunkDbConfig(PathName, new PrefixFileNamingStrategy(PathName, "prefix.tf"), 10000, 0, new InMemoryCheckpoint(), new ICheckpoint[0]); var db = new TFChunkDb(config); CreateChunk(Path.Combine(PathName, config.FileNamingStrategy.GetFilenameFor(0)), config.ChunkSize, config.ChunkSize); Assert.DoesNotThrow(() => db.OpenVerifyAndClean(verifyHash: false)); db.Dispose(); }
public void allows_with_exactly_enough_file_to_reach_checksum() { var config = new TFChunkDbConfig(PathName, new PrefixFileNamingStrategy(PathName, "prefix.tf"), 10000, 0, new InMemoryCheckpoint(10000), new ICheckpoint[0]); var db = new TFChunkDb(config); CreateChunk(Path.Combine(PathName, config.FileNamingStrategy.GetFilenameFor(0)), config.ChunkSize, config.ChunkSize); Assert.DoesNotThrow(() => db.OpenVerifyAndClean(verifyHash: false)); db.Dispose(); }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); WriterChecksum = new InMemoryCheckpoint(0); ChaserChecksum = new InMemoryCheckpoint(0); Db = new TFChunkDb(new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 10000, 0, WriterChecksum, ChaserChecksum, new[] { WriterChecksum, ChaserChecksum })); Db.OpenVerifyAndClean(); // create db Writer = new TFChunkWriter(Db); Writer.Open(); WriteTestScenario(); Writer.Close(); Writer = null; WriterChecksum.Flush(); ChaserChecksum.Write(WriterChecksum.Read()); ChaserChecksum.Flush(); TableIndex = new TableIndex(Path.Combine(PathName, "index"), () => new HashListMemTable(MaxEntriesInMemTable * 2), MaxEntriesInMemTable); var reader = new TFChunkReader(Db, Db.Config.WriterCheckpoint); ReadIndex = new ReadIndex(new NoopPublisher(), 2, () => new TFChunkSequentialReader(Db, Db.Config.WriterCheckpoint, 0), () => reader, TableIndex, new ByLengthHasher(), new NoLRUCache <string, StreamCacheInfo>()); ReadIndex.Build(); // scavenge must run after readIndex is built if (_scavenge) { _scavenger = new TFChunkScavenger(Db, ReadIndex); _scavenger.Scavenge(alwaysKeepScavenged: true); } }
public void allows_next_new_chunk_when_checksum_is_exactly_in_between_two_chunks() { var config = new TFChunkDbConfig(PathName, new PrefixFileNamingStrategy(PathName, "prefix.tf"), 10000, 0, new InMemoryCheckpoint(10000), new ICheckpoint[0]); var db = new TFChunkDb(config); CreateChunk(Path.Combine(PathName, config.FileNamingStrategy.GetFilenameFor(0)), config.ChunkSize, config.ChunkSize); CreateChunk(Path.Combine(PathName, config.FileNamingStrategy.GetFilenameFor(1)), config.ChunkSize, config.ChunkSize); Assert.DoesNotThrow(() => db.OpenVerifyAndClean(verifyHash: false)); db.Dispose(); }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); _db = new TFChunkDb(new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 4096, 0, new InMemoryCheckpoint(), new InMemoryCheckpoint(), new ICheckpoint[0])); _db.OpenVerifyAndClean(); var chunk = _db.Manager.GetChunk(0); _records = new LogRecord[RecordsCount]; _results = new RecordWriteResult[RecordsCount]; var pos = 0; for (int i = 0; i < RecordsCount; ++i) { if (i > 0 && i % 3 == 0) { pos = i / 3 * _db.Config.ChunkSize; chunk.Complete(); chunk = _db.Manager.AddNewChunk(); } _records[i] = LogRecord.SingleWrite(pos, Guid.NewGuid(), Guid.NewGuid(), i % 2 == 1 ? "es-to-scavenge": "es1", ExpectedVersion.Any, "et1", new byte[1200], new byte[] { 5, 7 }); _results[i] = chunk.TryAppend(_records[i]); pos += _records[i].GetSizeWithLengthPrefixAndSuffix(); } _keptRecords = _records.Where((x, i) => i % 2 == 0).ToArray(); chunk.Flush(); chunk.Complete(); _db.Config.WriterCheckpoint.Write((RecordsCount / 3) * _db.Config.ChunkSize + _results[RecordsCount - 1].NewPosition); _db.Config.WriterCheckpoint.Flush(); var scavenger = new TFChunkScavenger(_db, new FakeReadIndex(x => x == "es-to-scavenge")); scavenger.Scavenge(alwaysKeepScavenged: true); }
public void when_in_brand_new_extraneous_files_throws_corrupt_database_exception() { var config = new TFChunkDbConfig(PathName, new PrefixFileNamingStrategy(PathName, "prefix.tf"), 10000, 0, new InMemoryCheckpoint(0), new ICheckpoint[0]); var db = new TFChunkDb(config); CreateChunk(Path.Combine(PathName, config.FileNamingStrategy.GetFilenameFor(4)), config.ChunkSize, config.ChunkSize); var ex = Assert.Throws <CorruptDatabaseException>(() => db.OpenVerifyAndClean(verifyHash: false)); Assert.IsInstanceOf <ExtraneousFileFoundException>(ex.InnerException); db.Dispose(); }
public void when_a_reader_checksum_is_ahead_of_writer_checksum_throws_corrupt_database_exception() { var config = new TFChunkDbConfig(PathName, new PrefixFileNamingStrategy(PathName, "prefix.tf"), 10000, 0, new InMemoryCheckpoint(0), new ICheckpoint[] { new InMemoryCheckpoint(11) }); var db = new TFChunkDb(config); CreateChunk(Path.Combine(PathName, config.FileNamingStrategy.GetFilenameFor(0)), config.ChunkSize, config.ChunkSize); var ex = Assert.Throws <CorruptDatabaseException>(() => db.OpenVerifyAndClean(verifyHash: false)); Assert.IsInstanceOf <ReaderCheckpointHigherThanWriterException>(ex.InnerException); db.Dispose(); }
public void with_file_of_wrong_size_database_corruption_is_detected() { var config = new TFChunkDbConfig(PathName, new PrefixFileNamingStrategy(PathName, "prefix.tf"), 10000, 0, new InMemoryCheckpoint(500), new ICheckpoint[0]); var db = new TFChunkDb(config); File.WriteAllText(Path.Combine(PathName, config.FileNamingStrategy.GetFilenameFor(0)), "this is just some test blahbydy blah"); var ex = Assert.Throws <CorruptDatabaseException>(() => db.OpenVerifyAndClean(verifyHash: false)); Assert.IsInstanceOf <BadChunkInDatabaseException>(ex.InnerException); db.Dispose(); }
public void with_wrong_actual_chunk_size_in_chunk_footer() { var config = new TFChunkDbConfig(PathName, new PrefixFileNamingStrategy(PathName, "prefix.tf"), 10000, 0, new InMemoryCheckpoint(10000), new ICheckpoint[0]); var db = new TFChunkDb(config); CreateChunk(Path.Combine(PathName, config.FileNamingStrategy.GetFilenameFor(0)), 10000, 12000); var ex = Assert.Throws <CorruptDatabaseException>(() => db.OpenVerifyAndClean(verifyHash: false)); Assert.IsInstanceOf <BadChunkInDatabaseException>(ex.InnerException); db.Dispose(); }
public void with_not_enough_files_to_reach_checksum_throws() { var config = new TFChunkDbConfig(PathName, new PrefixFileNamingStrategy(PathName, "prefix.tf"), 10000, 0, new InMemoryCheckpoint(15000), new ICheckpoint[0]); var db = new TFChunkDb(config); CreateChunk(Path.Combine(PathName, config.FileNamingStrategy.GetFilenameFor(0)), config.ChunkSize, config.ChunkSize); var exc = Assert.Throws <CorruptDatabaseException>(() => db.OpenVerifyAndClean(verifyHash: false)); Assert.IsInstanceOf <ChunkNotFoundException>(exc.InnerException); db.Dispose(); }
public void a_record_can_be_written() { _checkpoint = new InMemoryCheckpoint(0); var db = new TFChunkDb(new TFChunkDbConfig(PathName, new PrefixFileNamingStrategy(PathName, "prefix.tf"), 1000, 0, _checkpoint, new InMemoryCheckpoint(), new ICheckpoint[0])); db.OpenVerifyAndClean(); var tf = new TFChunkWriter(db); tf.Open(); var record = new PrepareLogRecord(logPosition: 0, correlationId: _correlationId, eventId: _eventId, transactionPosition: 0, transactionOffset: 0, eventStreamId: "WorldEnding", expectedVersion: 1234, timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: "type", data: new byte[] { 1, 2, 3, 4, 5 }, metadata: new byte[] { 7, 17 }); long tmp; tf.Write(record, out tmp); tf.Close(); db.Dispose(); Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix(), _checkpoint.Read()); using (var filestream = File.Open(Path.Combine(PathName, "prefix.tf0"), FileMode.Open, FileAccess.Read)) { filestream.Position = ChunkHeader.Size; var reader = new BinaryReader(filestream); reader.ReadInt32(); var read = LogRecord.ReadFrom(reader); Assert.AreEqual(record, read); } }
public void with_a_writer_checksum_of_zero_the_first_chunk_is_created_with_correct_name() { var config = new TFChunkDbConfig(PathName, new PrefixFileNamingStrategy(PathName, "prefix.tf"), 10000, 0, new InMemoryCheckpoint(0), new ICheckpoint[0]); var db = new TFChunkDb(config); db.OpenVerifyAndClean(); db.Dispose(); Assert.AreEqual(1, Directory.GetFiles(PathName).Length); Assert.IsTrue(File.Exists(Path.Combine(PathName, "prefix.tf0"))); var fileInfo = new FileInfo(Path.Combine(PathName, "prefix.tf0")); Assert.AreEqual(10000 + ChunkHeader.Size + ChunkFooter.Size, fileInfo.Length); }
public void try_read_returns_false_when_writer_checksum_is_zero() { var writerchk = new InMemoryCheckpoint(0); var db = new TFChunkDb(new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 10000, 0, writerchk, new InMemoryCheckpoint())); db.OpenVerifyAndClean(); var reader = new TFChunkSequentialReader(db, writerchk, 0); LogRecord record; Assert.IsFalse(reader.TryReadNext(out record)); db.Close(); }