public void when_checkpoint_is_exactly_on_the_boundary_of_chunk_the_last_chunk_could_be_present() { var config = new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 100, 0, new InMemoryCheckpoint(200), new InMemoryCheckpoint(), new ICheckpoint[0]); var db = new TFChunkDb(config); CreateChunk(Path.Combine(PathName, "chunk-000000.000000"), config.ChunkSize, config.ChunkSize); CreateChunk(Path.Combine(PathName, "chunk-000001.000001"), config.ChunkSize, config.ChunkSize); CreateOngoingChunk(Path.Combine(PathName, "chunk-000002.000000"), config.ChunkSize, config.ChunkSize); Assert.DoesNotThrow(() => db.OpenVerifyAndClean(verifyHash: false)); Assert.IsNotNull(db.Manager.GetChunk(2)); Assert.IsTrue(File.Exists(Path.Combine(PathName, "chunk-000000.000000"))); Assert.IsTrue(File.Exists(Path.Combine(PathName, "chunk-000001.000001"))); Assert.IsTrue(File.Exists(Path.Combine(PathName, "chunk-000002.000000"))); Assert.AreEqual(3, Directory.GetFiles(PathName, "*").Length); db.Dispose(); }
public void old_version_of_chunks_are_removed() { File.Create(Path.Combine(PathName, "foo")).Close(); File.Create(Path.Combine(PathName, "bla")).Close(); var config = new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 100, 0, new InMemoryCheckpoint(400), new ICheckpoint[0]); var db = new TFChunkDb(config); CreateChunk(Path.Combine(PathName, "chunk-000000.000000"), config.ChunkSize, config.ChunkSize); CreateChunk(Path.Combine(PathName, "chunk-000000.000002"), config.ChunkSize, config.ChunkSize); CreateChunk(Path.Combine(PathName, "chunk-000000.000005"), config.ChunkSize, config.ChunkSize); CreateChunk(Path.Combine(PathName, "chunk-000001.000000"), config.ChunkSize, config.ChunkSize); CreateChunk(Path.Combine(PathName, "chunk-000001.000001"), config.ChunkSize, config.ChunkSize); CreateChunk(Path.Combine(PathName, "chunk-000002.000000"), config.ChunkSize, config.ChunkSize); CreateChunk(Path.Combine(PathName, "chunk-000003.000007"), config.ChunkSize, config.ChunkSize); CreateChunk(Path.Combine(PathName, "chunk-000003.000008"), config.ChunkSize, config.ChunkSize); Assert.DoesNotThrow(db.OpenVerifyAndClean); Assert.IsTrue(File.Exists(Path.Combine(PathName, "foo"))); Assert.IsTrue(File.Exists(Path.Combine(PathName, "bla"))); Assert.IsTrue(File.Exists(Path.Combine(PathName, "chunk-000000.000005"))); Assert.IsTrue(File.Exists(Path.Combine(PathName, "chunk-000001.000001"))); Assert.IsTrue(File.Exists(Path.Combine(PathName, "chunk-000002.000000"))); Assert.IsTrue(File.Exists(Path.Combine(PathName, "chunk-000003.000008"))); Assert.AreEqual(6, Directory.GetFiles(PathName, "*").Length); db.Dispose(); }
public void try_read_returns_false_when_writer_checkpoint_is_zero() { var writerchk = new InMemoryCheckpoint(0); var db = new TFChunkDb(new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 10000, 0, writerchk, new InMemoryCheckpoint(), new InMemoryCheckpoint(-1), new InMemoryCheckpoint(-1))); db.Open(); var chaser = new TFChunkChaser(db, writerchk, new InMemoryCheckpoint()); chaser.Open(); LogRecord record; Assert.IsFalse(chaser.TryReadNext(out record)); chaser.Close(); db.Dispose(); }
public void with_a_writer_checksum_of_zero_and_no_files_is_valid() { var db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, 0)); Assert.DoesNotThrow(() => db.Open()); db.Dispose(); }
public void temporary_files_are_removed() { var config = new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 100, 0, new InMemoryCheckpoint(150), new ICheckpoint[0]); var db = new TFChunkDb(config); CreateChunk(Path.Combine(PathName, "chunk-000000.000000"), config.ChunkSize, config.ChunkSize); CreateChunk(Path.Combine(PathName, "chunk-000001.000001"), config.ChunkSize, config.ChunkSize); File.Create(Path.Combine(PathName, "bla")).Close(); File.Create(Path.Combine(PathName, "bla.scavenge.tmp")).Close(); File.Create(Path.Combine(PathName, "bla.tmp")).Close(); Assert.DoesNotThrow(db.OpenVerifyAndClean); Assert.IsTrue(File.Exists(Path.Combine(PathName, "chunk-000000.000000"))); Assert.IsTrue(File.Exists(Path.Combine(PathName, "chunk-000001.000001"))); Assert.IsTrue(File.Exists(Path.Combine(PathName, "bla"))); Assert.AreEqual(3, Directory.GetFiles(PathName, "*").Length); db.Dispose(); }
public void Dispose() { //epochManager?.Dispose(); //reader?.Dispose(); _writer?.Dispose(); _db?.Dispose(); }
public override Task TestFixtureTearDown() { _logFormat?.Dispose(); _db.Dispose(); return(base.TestFixtureTearDown()); }
public void try_read_returns_false_when_writer_checksum_is_equal_to_reader_checksum() { var writerchk = new InMemoryCheckpoint(); var chaserchk = new InMemoryCheckpoint(Checkpoint.Chaser, 0); var db = new TFChunkDb(new TFChunkDbConfig(PathName, new PrefixFileNamingStrategy(PathName, "prefix.tf"), 10000, 0, writerchk, chaserchk, new[] { writerchk, chaserchk })); db.OpenVerifyAndClean(); writerchk.Write(12); writerchk.Flush(); chaserchk.Write(12); chaserchk.Flush(); var chaser = new TFChunkChaser(db, writerchk, chaserchk); chaser.Open(); LogRecord record; Assert.IsFalse(chaser.TryReadNext(out record)); Assert.AreEqual(12, chaserchk.Read()); chaser.Close(); db.Dispose(); }
public void when_checkpoint_is_on_boundary_of_new_chunk_excessive_last_chunks_are_removed_if_present_and_new_empty_one_is_created() { var config = new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 100, 0, new InMemoryCheckpoint(200), new InMemoryCheckpoint(), new ICheckpoint[0]); var db = new TFChunkDb(config); CreateChunk(Path.Combine(PathName, "chunk-000000.000000"), config.ChunkSize, config.ChunkSize); CreateChunk(Path.Combine(PathName, "chunk-000001.000001"), config.ChunkSize, config.ChunkSize); CreateChunk(Path.Combine(PathName, "chunk-000002.000000"), config.ChunkSize, config.ChunkSize); CreateChunk(Path.Combine(PathName, "chunk-000002.000001"), config.ChunkSize, config.ChunkSize); Assert.DoesNotThrow(() => db.OpenVerifyAndClean(verifyHash: false)); Assert.IsTrue(File.Exists(Path.Combine(PathName, "chunk-000000.000000"))); Assert.IsTrue(File.Exists(Path.Combine(PathName, "chunk-000001.000001"))); Assert.IsTrue(File.Exists(Path.Combine(PathName, "chunk-000002.000000"))); Assert.AreEqual(3, Directory.GetFiles(PathName, "*").Length); db.Dispose(); }
public void with_a_writer_checksum_of_nonzero_and_no_files_a_corrupted_database_exception_is_thrown() { var db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, 500)); var exc = Assert.Throws <CorruptDatabaseException>(() => db.Open()); Assert.IsInstanceOf <ChunkNotFoundException>(exc.InnerException); db.Dispose(); }
public virtual async Task DisposeAsync() { await _node.StopAsync(); _db.Dispose(); await _host.StopAsync(); _host.Dispose(); Client.Dispose(); }
public void a_record_can_be_written() { var filename = Path.Combine(PathName, "prefix.tf0"); var chunkHeader = new ChunkHeader(TFChunk.CurrentChunkVersion, 10000, 0, 0, 0); var chunkBytes = chunkHeader.AsByteArray(); var buf = new byte[ChunkHeader.Size + ChunkFooter.Size + chunkHeader.ChunkSize]; Buffer.BlockCopy(chunkBytes, 0, buf, 0, chunkBytes.Length); File.WriteAllBytes(filename, buf); _checkpoint = new InMemoryCheckpoint(137); var db = new TFChunkDb(new TFChunkDbConfig(PathName, new PrefixFileNamingStrategy(PathName, "prefix.tf"), chunkHeader.ChunkSize, 0, _checkpoint, new ICheckpoint[0])); db.OpenVerifyAndClean(); var writer = new TFChunkWriter(db); var record = new PrepareLogRecord(logPosition: 0, correlationId: _correlationId, eventId: _eventId, transactionPosition: 0, transactionOffset: 0, eventStreamId: "WorldEnding", expectedVersion: 1234, timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: "type", data: new byte[8000], metadata: new byte[] { 7, 17 }); Console.WriteLine(record.GetSizeWithLengthPrefixAndSuffix()); Console.WriteLine(record.GetSizeWithLengthPrefixAndSuffix() + 137); long pos; Assert.IsTrue(writer.Write(record, out pos)); writer.Close(); db.Dispose(); Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix() + 137, _checkpoint.Read()); using (var filestream = File.Open(filename, FileMode.Open, FileAccess.Read)) { filestream.Seek(ChunkHeader.Size + 137 + sizeof(int), SeekOrigin.Begin); var reader = new BinaryReader(filestream); var read = LogRecord.ReadFrom(reader); Console.WriteLine(string.Join("\n", Directory.EnumerateFiles(PathName))); Assert.AreEqual(record, read); } }
public void a_record_can_be_written() { var filename = GetFilePathFor("chunk-000000.000000"); var chunkHeader = new ChunkHeader(TFChunk.CurrentChunkVersion, 10000, 0, 0, false, Guid.NewGuid()); var chunkBytes = chunkHeader.AsByteArray(); var buf = new byte[ChunkHeader.Size + ChunkFooter.Size + chunkHeader.ChunkSize]; Buffer.BlockCopy(chunkBytes, 0, buf, 0, chunkBytes.Length); File.WriteAllBytes(filename, buf); _checkpoint = new InMemoryCheckpoint(137); var db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, _checkpoint, new InMemoryCheckpoint(), chunkSize: chunkHeader.ChunkSize)); db.Open(); var bytes = new byte[3994]; // this gives exactly 4097 size of record, with 3993 (rec size 4096) everything works fine! new Random().NextBytes(bytes); var writer = new TFChunkWriter(db); var logFormat = LogFormatHelper <TLogFormat, TStreamId> .LogFormat; logFormat.StreamNameIndex.GetOrAddId("WorldEnding", out var streamId, out _, out _); var record = LogRecord.Prepare( factory: logFormat.RecordFactory, logPosition: 137, correlationId: _correlationId, eventId: _eventId, transactionPos: 789, transactionOffset: 543, eventStreamId: streamId, expectedVersion: 1234, timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.SingleWrite, eventType: "type", data: bytes, metadata: new byte[] { 0x07, 0x17 }); long pos; Assert.IsTrue(writer.Write(record, out pos)); writer.Close(); db.Dispose(); Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix() + 137, _checkpoint.Read()); using (var filestream = File.Open(filename, FileMode.Open, FileAccess.Read)) { filestream.Seek(ChunkHeader.Size + 137 + sizeof(int), SeekOrigin.Begin); var reader = new BinaryReader(filestream); var read = LogRecord.ReadFrom(reader, (int)reader.BaseStream.Length); Assert.AreEqual(record, read); } }
public void Dispose() { //epochManager?.Dispose(); //reader?.Dispose(); try { _writer?.Dispose(); } catch { //workaround for TearDown error } _db?.Dispose(); }
public void Shutdown() { _node.Stop(); Thread.Sleep(2000); _chaserChk.Dispose(); _writerChk.Dispose(); _tfChunkDb.Dispose(); TryDeleteDirectory(_oneTimeDbPath); }
public void a_record_can_be_written() { var filename = Path.Combine(PathName, "prefix.tf0"); var chunkHeader = new ChunkHeader(TFChunk.CurrentChunkVersion, 10000, 0, 0, 0); var chunkBytes = chunkHeader.AsByteArray(); var buf = new byte[ChunkHeader.Size + ChunkFooter.Size + chunkHeader.ChunkSize]; Buffer.BlockCopy(chunkBytes, 0, buf, 0, chunkBytes.Length); File.WriteAllBytes(filename, buf); _checkpoint = new InMemoryCheckpoint(137); var db = new TFChunkDb(new TFChunkDbConfig(PathName, new PrefixFileNamingStrategy(PathName, "prefix.tf"), chunkHeader.ChunkSize, 0, _checkpoint, new InMemoryCheckpoint(), new ICheckpoint[0])); db.OpenVerifyAndClean(); var bytes = new byte[3994]; // this gives exactly 4097 size of record, with 3993 (rec size 4096) everything works fine! new Random().NextBytes(bytes); var writer = new TFChunkWriter(db); var record = new PrepareLogRecord(logPosition: 123, correlationId: _correlationId, eventId: _eventId, transactionPosition: 789, transactionOffset: 543, eventStreamId: "WorldEnding", expectedVersion: 1234, timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.SingleWrite, eventType: "type", data: bytes, metadata: new byte[] { 0x07, 0x17 }); long pos; Assert.IsTrue(writer.Write(record, out pos)); writer.Close(); db.Dispose(); Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix() + 137, _checkpoint.Read()); using (var filestream = File.Open(filename, FileMode.Open, FileAccess.Read)) { filestream.Seek(ChunkHeader.Size + 137 + sizeof(int), SeekOrigin.Begin); var reader = new BinaryReader(filestream); var read = LogRecord.ReadFrom(reader); Assert.AreEqual(record, read); } }
public override void TestFixtureTearDown() { ReadIndex.Close(); ReadIndex.Dispose(); TableIndex.Close(); Db.Close(); Db.Dispose(); base.TestFixtureTearDown(); }
public override Task TestFixtureTearDown() { ReadIndex.Close(); ReadIndex.Dispose(); _tableIndex.Close(); _db.Close(); _db.Dispose(); return(base.TestFixtureTearDown()); }
public void with_a_writer_checksum_of_zero_and_no_files_is_valid() { var db = new TFChunkDb(new TFChunkDbConfig(PathName, new PrefixFileNamingStrategy(PathName, "prefix.tf"), 10000, 0, new InMemoryCheckpoint(0), new ICheckpoint[0])); Assert.DoesNotThrow(() => db.OpenVerifyAndClean()); db.Dispose(); }
public void allows_no_files_when_checkpoint_is_zero() { var config = new TFChunkDbConfig(PathName, new PrefixFileNamingStrategy(PathName, "prefix.tf"), 10000, 0, new InMemoryCheckpoint(), new ICheckpoint[0]); var db = new TFChunkDb(config); Assert.DoesNotThrow(db.OpenVerifyAndClean); db.Dispose(); }
public override Task TestFixtureTearDown() { _logFormat?.Dispose(); ReadIndex?.Close(); ReadIndex?.Dispose(); TableIndex?.Close(); Db?.Close(); Db?.Dispose(); return(base.TestFixtureTearDown()); }
public void with_a_writer_checksum_of_nonzero_and_no_files_a_corrupted_database_exception_is_thrown() { var db = new TFChunkDb(new TFChunkDbConfig(PathName, new PrefixFileNamingStrategy(PathName, "prefix.tf"), 10000, 0, new InMemoryCheckpoint(500), new ICheckpoint[0])); var exc = Assert.Throws <CorruptDatabaseException>(() => db.OpenVerifyAndClean()); Assert.IsInstanceOf <ChunkNotFoundException>(exc.InnerException); db.Dispose(); }
public void with_a_writer_checksum_of_zero_and_no_files_is_valid() { var db = new TFChunkDb(new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 10000, 0, new InMemoryCheckpoint(0), new InMemoryCheckpoint(), new InMemoryCheckpoint(-1), new InMemoryCheckpoint(-1))); Assert.DoesNotThrow(() => db.Open()); db.Dispose(); }
public void a_record_can_be_written() { var filename = Path.Combine(PathName, "prefix.tf0"); var chunkHeader = new ChunkHeader(TFChunk.CurrentChunkVersion, 10000, 0, 0, 0); var chunkBytes = chunkHeader.AsByteArray(); var bytes = new byte[ChunkHeader.Size + 10000 + ChunkFooter.Size]; Buffer.BlockCopy(chunkBytes, 0, bytes, 0, chunkBytes.Length); File.WriteAllBytes(filename, bytes); _checkpoint = new InMemoryCheckpoint(137); var db = new TFChunkDb(new TFChunkDbConfig(PathName, new PrefixFileNamingStrategy(PathName, "prefix.tf"), 10000, 0, _checkpoint, new InMemoryCheckpoint(), new ICheckpoint[0])); db.OpenVerifyAndClean(); var tf = new TFChunkWriter(db); var record = new PrepareLogRecord(logPosition: 0, correlationId: _correlationId, eventId: _eventId, expectedVersion: 1234, transactionPosition: 0, transactionOffset: 0, eventStreamId: "WorldEnding", timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: "type", data: new byte[] { 1, 2, 3, 4, 5 }, metadata: new byte[] { 7, 17 }); long tmp; tf.Write(record, out tmp); //tf.Flush(); tf.Close(); db.Dispose(); Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix() + 137, _checkpoint.Read()); //137 is fluff assigned to beginning of checkpoint //TODO actually read the event using (var filestream = File.Open(filename, FileMode.Open, FileAccess.Read)) { filestream.Seek(ChunkHeader.Size + 137 + sizeof(int), SeekOrigin.Begin); var reader = new BinaryReader(filestream); var read = LogRecord.ReadFrom(reader); Assert.AreEqual(record, read); } }
public void with_a_writer_checksum_of_zero_the_first_chunk_is_created_with_correct_name_and_is_aligned() { var config = TFChunkHelper.CreateDbConfig(PathName, 0); var db = new TFChunkDb(config); db.Open(); db.Dispose(); Assert.AreEqual(1, Directory.GetFiles(PathName).Length); Assert.IsTrue(File.Exists(GetFilePathFor("chunk-000000.000000"))); var fileInfo = new FileInfo(GetFilePathFor("chunk-000000.000000")); Assert.AreEqual(12288, fileInfo.Length); }
public void allows_first_correct_file_when_checkpoint_is_zero() { var config = new TFChunkDbConfig(PathName, new PrefixFileNamingStrategy(PathName, "prefix.tf"), 10000, 0, new InMemoryCheckpoint(), new ICheckpoint[0]); var db = new TFChunkDb(config); CreateChunk(Path.Combine(PathName, config.FileNamingStrategy.GetFilenameFor(0)), config.ChunkSize, config.ChunkSize); Assert.DoesNotThrow(db.OpenVerifyAndClean); db.Dispose(); }
public void allows_with_exactly_enough_file_to_reach_checksum() { var config = new TFChunkDbConfig(PathName, new PrefixFileNamingStrategy(PathName, "prefix.tf"), 10000, 0, new InMemoryCheckpoint(10000), new ICheckpoint[0]); var db = new TFChunkDb(config); CreateChunk(Path.Combine(PathName, config.FileNamingStrategy.GetFilenameFor(0)), config.ChunkSize, config.ChunkSize); Assert.DoesNotThrow(db.OpenVerifyAndClean); db.Dispose(); }
public void a_record_can_be_written() { var filename = GetFilePathFor("chunk-000000.000000"); var chunkHeader = new ChunkHeader(TFChunk.CurrentChunkVersion, 10000, 0, 0, false, chunkId: Guid.NewGuid()); var chunkBytes = chunkHeader.AsByteArray(); var bytes = new byte[ChunkHeader.Size + 10000 + ChunkFooter.Size]; Buffer.BlockCopy(chunkBytes, 0, bytes, 0, chunkBytes.Length); File.WriteAllBytes(filename, bytes); _checkpoint = new InMemoryCheckpoint(137); var db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, _checkpoint, new InMemoryCheckpoint())); db.Open(); var recordFactory = LogFormatHelper <TLogFormat, TStreamId> .RecordFactory; var streamId = LogFormatHelper <TLogFormat, TStreamId> .StreamId; var eventTypeId = LogFormatHelper <TLogFormat, TStreamId> .EventTypeId; var tf = new TFChunkWriter(db); var record = LogRecord.Prepare( factory: recordFactory, logPosition: _checkpoint.Read(), correlationId: _correlationId, eventId: _eventId, expectedVersion: 1234, transactionPos: 0, transactionOffset: 0, eventStreamId: streamId, timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: eventTypeId, data: new byte[] { 1, 2, 3, 4, 5 }, metadata: new byte[] { 7, 17 }); long tmp; tf.Write(record, out tmp); tf.Close(); db.Dispose(); Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix() + 137, _checkpoint.Read()); //137 is fluff assigned to beginning of checkpoint using (var filestream = File.Open(filename, FileMode.Open, FileAccess.Read)) { filestream.Seek(ChunkHeader.Size + 137 + sizeof(int), SeekOrigin.Begin); var reader = new BinaryReader(filestream); var read = LogRecord.ReadFrom(reader, (int)reader.BaseStream.Length); Assert.AreEqual(record, read); } }
public void allows_next_new_chunk_when_checksum_is_exactly_in_between_two_chunks() { var config = new TFChunkDbConfig(PathName, new PrefixFileNamingStrategy(PathName, "prefix.tf"), 10000, 0, new InMemoryCheckpoint(10000), new ICheckpoint[0]); var db = new TFChunkDb(config); CreateChunk(Path.Combine(PathName, config.FileNamingStrategy.GetFilenameFor(0)), config.ChunkSize, config.ChunkSize); CreateChunk(Path.Combine(PathName, config.FileNamingStrategy.GetFilenameFor(1)), config.ChunkSize, config.ChunkSize); Assert.DoesNotThrow(db.OpenVerifyAndClean); db.Dispose(); }
public void with_not_enough_files_to_reach_checksum_throws() { var config = new TFChunkDbConfig(PathName, new PrefixFileNamingStrategy(PathName, "prefix.tf"), 10000, 0, new InMemoryCheckpoint(15000), new ICheckpoint[0]); var db = new TFChunkDb(config); CreateChunk(Path.Combine(PathName, config.FileNamingStrategy.GetFilenameFor(0)), config.ChunkSize, config.ChunkSize); var exc = Assert.Throws <CorruptDatabaseException>(db.OpenVerifyAndClean); Assert.IsInstanceOf <ChunkNotFoundException>(exc.InnerException); db.Dispose(); }