public void CreateDb(params Rec[] records) { if (DbRes != null) { DbRes.Db.Close(); } var indexDirectory = GetFilePathFor("index"); _logFormat = LogFormatHelper <TLogFormat, TStreamId> .LogFormatFactory.Create(new() { IndexDirectory = indexDirectory, }); var dbConfig = TFChunkHelper.CreateSizedDbConfig(PathName, 0, chunkSize: 1024 * 1024); var dbHelper = new TFChunkDbCreationHelper <TLogFormat, TStreamId>(dbConfig, _logFormat); DbRes = dbHelper.Chunk(records).CreateDb(); DbRes.Db.Config.WriterCheckpoint.Flush(); DbRes.Db.Config.ChaserCheckpoint.Write(DbRes.Db.Config.WriterCheckpoint.Read()); DbRes.Db.Config.ChaserCheckpoint.Flush(); var readers = new ObjectPool <ITransactionFileReader>( "Readers", 2, 2, () => new TFChunkReader(DbRes.Db, DbRes.Db.Config.WriterCheckpoint)); var lowHasher = _logFormat.LowHasher; var highHasher = _logFormat.HighHasher; var emptyStreamId = _logFormat.EmptyStreamId; TableIndex = new TableIndex <TStreamId>(indexDirectory, lowHasher, highHasher, emptyStreamId, () => new HashListMemTable(PTableVersions.IndexV3, MaxEntriesInMemTable * 2), () => new TFReaderLease(readers), PTableVersions.IndexV3, int.MaxValue, Constants.PTableMaxReaderCountDefault, MaxEntriesInMemTable); _logFormat.StreamNamesProvider.SetTableIndex(TableIndex); var readIndex = new ReadIndex <TStreamId>(new NoopPublisher(), readers, TableIndex, _logFormat.StreamNameIndexConfirmer, _logFormat.StreamIds, _logFormat.StreamNamesProvider, _logFormat.EmptyStreamId, _logFormat.StreamIdValidator, _logFormat.StreamIdSizer, _logFormat.StreamExistenceFilter, _logFormat.StreamExistenceFilterReader, 0, additionalCommitChecks: true, metastreamMaxCount: _metastreamMaxCount, hashCollisionReadLimit: Opts.HashCollisionReadLimitDefault, skipIndexScanOnReads: Opts.SkipIndexScanOnReadsDefault, replicationCheckpoint: DbRes.Db.Config.ReplicationCheckpoint, indexCheckpoint: DbRes.Db.Config.IndexCheckpoint); readIndex.IndexCommitter.Init(DbRes.Db.Config.ChaserCheckpoint.Read()); ReadIndex = readIndex; }
public override async Task TestFixtureSetUp() { await base.TestFixtureSetUp(); var dbConfig = TFChunkHelper.CreateSizedDbConfig(PathName, 0, chunkSize: 1024 * 1024); var dbCreationHelper = new TFChunkDbCreationHelper(dbConfig); _dbResult = dbCreationHelper .Chunk().CompleteLastChunk() .Chunk().CompleteLastChunk() .Chunk() .CreateDb(); _dbResult.Db.Config.WriterCheckpoint.Flush(); _dbResult.Db.Config.ChaserCheckpoint.Write(_dbResult.Db.Config.WriterCheckpoint.Read()); _dbResult.Db.Config.ChaserCheckpoint.Flush(); Log = new FakeTFScavengerLog(); FakeTableIndex = new FakeTableIndex(); TfChunkScavenger = new TFChunkScavenger(_dbResult.Db, Log, FakeTableIndex, new FakeReadIndex(_ => false)); try { await When().WithTimeout(TimeSpan.FromMinutes(1)); } catch (Exception ex) { throw new Exception("When Failed", ex); } }
public override async Task TestFixtureSetUp() { await base.TestFixtureSetUp(); var indexDirectory = GetFilePathFor("index"); _logFormat = LogFormatHelper <TLogFormat, TStreamId> .LogFormatFactory.Create(new() { IndexDirectory = indexDirectory, }); var dbConfig = TFChunkHelper.CreateSizedDbConfig(PathName, 0, chunkSize: 1024 * 1024); var dbCreationHelper = new TFChunkDbCreationHelper <TLogFormat, TStreamId>(dbConfig, _logFormat); _dbResult = dbCreationHelper .Chunk().CompleteLastChunk() .Chunk().CompleteLastChunk() .Chunk() .CreateDb(); _dbResult.Db.Config.WriterCheckpoint.Flush(); _dbResult.Db.Config.ChaserCheckpoint.Write(_dbResult.Db.Config.WriterCheckpoint.Read()); _dbResult.Db.Config.ChaserCheckpoint.Flush(); Log = new FakeTFScavengerLog(); FakeTableIndex = new FakeTableIndex <TStreamId>(); TfChunkScavenger = new TFChunkScavenger <TStreamId>(_dbResult.Db, Log, FakeTableIndex, new FakeReadIndex <TLogFormat, TStreamId>(_ => false, _logFormat.Metastreams), _logFormat.Metastreams); try { await When().WithTimeout(TimeSpan.FromMinutes(1)); } catch (Exception ex) { throw new Exception("When Failed", ex); } }
public void old_version_of_chunks_are_removed() { File.Create(GetFilePathFor("foo")).Close(); File.Create(GetFilePathFor("bla")).Close(); var config = TFChunkHelper.CreateSizedDbConfig(PathName, 450, chunkSize: 100); using (var db = new TFChunkDb(config)) { DbUtil.CreateSingleChunk(config, 0, GetFilePathFor("chunk-000000.000000")); DbUtil.CreateSingleChunk(config, 0, GetFilePathFor("chunk-000000.000002")); DbUtil.CreateSingleChunk(config, 0, GetFilePathFor("chunk-000000.000005")); DbUtil.CreateSingleChunk(config, 1, GetFilePathFor("chunk-000001.000000")); DbUtil.CreateMultiChunk(config, 1, 3, GetFilePathFor("chunk-000001.000001")); DbUtil.CreateSingleChunk(config, 2, GetFilePathFor("chunk-000002.000000")); DbUtil.CreateSingleChunk(config, 3, GetFilePathFor("chunk-000003.000000")); DbUtil.CreateSingleChunk(config, 3, GetFilePathFor("chunk-000003.000001")); DbUtil.CreateSingleChunk(config, 4, GetFilePathFor("chunk-000004.000007")); DbUtil.CreateOngoingChunk(config, 4, GetFilePathFor("chunk-000004.000008")); Assert.DoesNotThrow(() => db.Open(verifyHash: false)); Assert.IsTrue(File.Exists(GetFilePathFor("foo"))); Assert.IsTrue(File.Exists(GetFilePathFor("bla"))); Assert.IsTrue(File.Exists(GetFilePathFor("chunk-000000.000005"))); Assert.IsTrue(File.Exists(GetFilePathFor("chunk-000001.000001"))); Assert.IsTrue(File.Exists(GetFilePathFor("chunk-000004.000008"))); Assert.AreEqual(5, Directory.GetFiles(PathName, "*").Length); } }
public override async Task TestFixtureSetUp() { await base.TestFixtureSetUp(); var indexDirectory = GetFilePathFor("index"); _logFormat = LogFormatHelper <TLogFormat, TStreamId> .LogFormatFactory.Create(new() { IndexDirectory = indexDirectory, }); var dbConfig = TFChunkHelper.CreateSizedDbConfig(PathName, 0, chunkSize: 1024 * 1024); var dbCreationHelper = new TFChunkDbCreationHelper <TLogFormat, TStreamId>(dbConfig, _logFormat); _dbResult = CreateDb(dbCreationHelper); _keptRecords = KeptRecords(_dbResult); _dbResult.Db.Config.WriterCheckpoint.Flush(); _dbResult.Db.Config.ChaserCheckpoint.Write(_dbResult.Db.Config.WriterCheckpoint.Read()); _dbResult.Db.Config.ChaserCheckpoint.Flush(); var readerPool = new ObjectPool <ITransactionFileReader>( "ReadIndex readers pool", Constants.PTableInitialReaderCount, Constants.PTableMaxReaderCountDefault, () => new TFChunkReader(_dbResult.Db, _dbResult.Db.Config.WriterCheckpoint)); var lowHasher = _logFormat.LowHasher; var highHasher = _logFormat.HighHasher; var emptyStreamId = _logFormat.EmptyStreamId; var tableIndex = new TableIndex <TStreamId>(indexDirectory, lowHasher, highHasher, emptyStreamId, () => new HashListMemTable(PTableVersions.IndexV3, maxSize: 200), () => new TFReaderLease(readerPool), PTableVersions.IndexV3, 5, Constants.PTableMaxReaderCountDefault, maxSizeForMemory: 100, maxTablesPerLevel: 2); _logFormat.StreamNamesProvider.SetTableIndex(tableIndex); var readIndex = new ReadIndex <TStreamId>(new NoopPublisher(), readerPool, tableIndex, _logFormat.StreamNameIndexConfirmer, _logFormat.StreamIds, _logFormat.StreamNamesProvider, _logFormat.EmptyStreamId, _logFormat.StreamIdValidator, _logFormat.StreamIdSizer, _logFormat.StreamExistenceFilter, _logFormat.StreamExistenceFilterReader, _logFormat.EventTypeIndexConfirmer, 100, true, _metastreamMaxCount, Opts.HashCollisionReadLimitDefault, Opts.SkipIndexScanOnReadsDefault, _dbResult.Db.Config.ReplicationCheckpoint, _dbResult.Db.Config.IndexCheckpoint); readIndex.IndexCommitter.Init(_dbResult.Db.Config.WriterCheckpoint.Read()); ReadIndex = readIndex; var scavenger = new TFChunkScavenger <TStreamId>(_dbResult.Db, new FakeTFScavengerLog(), tableIndex, ReadIndex, _logFormat.Metastreams, unsafeIgnoreHardDeletes: UnsafeIgnoreHardDelete()); await scavenger.Scavenge(alwaysKeepScavenged : true, mergeChunks : false); }
public void detect_no_database() { var config = TFChunkHelper.CreateSizedDbConfig(PathName, 4000, chunkSize: 1000); using (var db = new TFChunkDb(config)) { Assert.That(() => db.Open(verifyHash: false), Throws.Exception.InstanceOf <CorruptDatabaseException>() .With.InnerException.InstanceOf <ChunkNotFoundException>()); } }
public void does_not_allow_checkpoint_to_point_into_the_middle_of_multichunk_chunk() { var config = TFChunkHelper.CreateSizedDbConfig(PathName, 1500, chunkSize: 1000); using (var db = new TFChunkDb(config)) { DbUtil.CreateSingleChunk(config, 0, GetFilePathFor("chunk-000000.000000")); DbUtil.CreateMultiChunk(config, 1, 10, GetFilePathFor("chunk-000001.000001")); Assert.That(() => db.Open(verifyHash: false), Throws.Exception.InstanceOf <CorruptDatabaseException>() .With.InnerException.InstanceOf <BadChunkInDatabaseException>()); } }
public void does_not_allow_not_completed_not_last_chunks() { var config = TFChunkHelper.CreateSizedDbConfig(PathName, 4000, chunkSize: 1000); using (var db = new TFChunkDb(config)) { DbUtil.CreateSingleChunk(config, 0, GetFilePathFor("chunk-000000.000000")); DbUtil.CreateOngoingChunk(config, 1, GetFilePathFor("chunk-000001.000000")); DbUtil.CreateOngoingChunk(config, 2, GetFilePathFor("chunk-000002.000000")); DbUtil.CreateOngoingChunk(config, 3, GetFilePathFor("chunk-000003.000000")); Assert.That(() => db.Open(verifyHash: false), Throws.Exception.InstanceOf <CorruptDatabaseException>() .With.InnerException.InstanceOf <BadChunkInDatabaseException>()); } }
when_checkpoint_is_on_boundary_of_new_chunk_and_last_chunk_is_truncated_but_not_completed_exception_is_thrown() { var config = TFChunkHelper.CreateSizedDbConfig(PathName, 200, chunkSize: 100); using (var db = new TFChunkDb(config)) { DbUtil.CreateSingleChunk(config, 0, GetFilePathFor("chunk-000000.000000")); DbUtil.CreateOngoingChunk(config, 1, GetFilePathFor("chunk-000001.000001"), actualSize: config.ChunkSize - 10); Assert.That(() => db.Open(verifyHash: false), Throws.Exception.InstanceOf <CorruptDatabaseException>() .With.InnerException.InstanceOf <BadChunkInDatabaseException>()); } }
when_checkpoint_is_exactly_on_the_boundary_of_chunk_the_last_chunk_could_be_not_present_but_should_be_created() { var config = TFChunkHelper.CreateSizedDbConfig(PathName, 200, chunkSize: 100); using (var db = new TFChunkDb(config)) { DbUtil.CreateMultiChunk(config, 0, 1, GetFilePathFor("chunk-000000.000001")); Assert.DoesNotThrow(() => db.Open(verifyHash: false)); Assert.IsNotNull(db.Manager.GetChunk(2)); Assert.IsTrue(File.Exists(GetFilePathFor("chunk-000000.000001"))); Assert.IsTrue(File.Exists(GetFilePathFor("chunk-000002.000000"))); Assert.AreEqual(2, Directory.GetFiles(PathName, "*").Length); } }
public void allows_checkpoint_to_point_into_the_middle_of_completed_chunk_when_enough_actual_data_in_chunk() { var config = TFChunkHelper.CreateSizedDbConfig(PathName, 1500, chunkSize: 1000); using (var db = new TFChunkDb(config)) { DbUtil.CreateSingleChunk(config, 0, GetFilePathFor("chunk-000000.000000")); DbUtil.CreateSingleChunk(config, 1, GetFilePathFor("chunk-000001.000001"), actualDataSize: 500); Assert.DoesNotThrow(() => db.Open(verifyHash: false)); Assert.IsTrue(File.Exists(GetFilePathFor("chunk-000000.000000"))); Assert.IsTrue(File.Exists(GetFilePathFor("chunk-000001.000001"))); Assert.AreEqual(2, Directory.GetFiles(PathName, "*").Length); } }
public override async Task TestFixtureSetUp() { await base.TestFixtureSetUp(); var dbConfig = TFChunkHelper.CreateSizedDbConfig(PathName, 0, chunkSize: 1024 * 1024); var dbCreationHelper = new TFChunkDbCreationHelper <TLogFormat, TStreamId>(dbConfig); DbRes = CreateDb(dbCreationHelper); DbRes.Db.Config.WriterCheckpoint.Flush(); DbRes.Db.Config.ChaserCheckpoint.Write(DbRes.Db.Config.WriterCheckpoint.Read()); DbRes.Db.Config.ChaserCheckpoint.Flush(); var logFormat = LogFormatHelper <TLogFormat, TStreamId> .LogFormat; var readers = new ObjectPool <ITransactionFileReader>( "Readers", 2, 2, () => new TFChunkReader(DbRes.Db, DbRes.Db.Config.WriterCheckpoint)); var lowHasher = logFormat.LowHasher; var highHasher = logFormat.HighHasher; var emptyStreamId = logFormat.EmptyStreamId; TableIndex = new TableIndex <TStreamId>(GetFilePathFor("index"), lowHasher, highHasher, emptyStreamId, () => new HashListMemTable(PTableVersions.IndexV2, MaxEntriesInMemTable * 2), () => new TFReaderLease(readers), PTableVersions.IndexV2, int.MaxValue, Constants.PTableMaxReaderCountDefault, MaxEntriesInMemTable); var readIndex = new ReadIndex <TStreamId>(new NoopPublisher(), readers, TableIndex, logFormat.StreamIds, logFormat.StreamNamesProvider, logFormat.EmptyStreamId, logFormat.StreamIdValidator, logFormat.StreamIdSizer, 0, additionalCommitChecks: true, metastreamMaxCount: _metastreamMaxCount, hashCollisionReadLimit: Opts.HashCollisionReadLimitDefault, skipIndexScanOnReads: Opts.SkipIndexScanOnReadsDefault, replicationCheckpoint: DbRes.Db.Config.ReplicationCheckpoint, indexCheckpoint: DbRes.Db.Config.IndexCheckpoint); readIndex.IndexCommitter.Init(DbRes.Db.Config.ChaserCheckpoint.Read()); ReadIndex = new TestReadIndex <TStreamId>(readIndex, logFormat.StreamNameIndex); }
public void allows_last_chunk_to_be_multichunk_when_checkpoint_point_at_the_start_of_next_chunk() { var config = TFChunkHelper.CreateSizedDbConfig(PathName, 4000, chunkSize: 1000); using (var db = new TFChunkDb(config)) { DbUtil.CreateSingleChunk(config, 0, GetFilePathFor("chunk-000000.000000")); DbUtil.CreateMultiChunk(config, 1, 3, GetFilePathFor("chunk-000001.000001")); Assert.DoesNotThrow(() => db.Open(verifyHash: false)); Assert.IsTrue(File.Exists(GetFilePathFor("chunk-000000.000000"))); Assert.IsTrue(File.Exists(GetFilePathFor("chunk-000001.000001"))); Assert.IsTrue(File.Exists(GetFilePathFor("chunk-000004.000000"))); Assert.AreEqual(3, Directory.GetFiles(PathName, "*").Length); } }
public void when_checkpoint_is_on_boundary_of_chunk_last_chunk_is_preserved() { var config = TFChunkHelper.CreateSizedDbConfig(PathName, 200, chunkSize: 100); using (var db = new TFChunkDb(config)) { DbUtil.CreateSingleChunk(config, 0, GetFilePathFor("chunk-000000.000000")); DbUtil.CreateSingleChunk(config, 1, GetFilePathFor("chunk-000001.000001")); DbUtil.CreateOngoingChunk(config, 2, GetFilePathFor("chunk-000002.000005")); Assert.DoesNotThrow(() => db.Open(verifyHash: false)); Assert.IsTrue(File.Exists(GetFilePathFor("chunk-000000.000000"))); Assert.IsTrue(File.Exists(GetFilePathFor("chunk-000001.000001"))); Assert.IsTrue(File.Exists(GetFilePathFor("chunk-000002.000005"))); Assert.AreEqual(3, Directory.GetFiles(PathName, "*").Length); } }
public void when_checkpoint_is_on_boundary_of_new_chunk_and_last_chunk_is_truncated_no_exception_is_thrown() { var config = TFChunkHelper.CreateSizedDbConfig(PathName, 300, chunkSize: 100); using (var db = new TFChunkDb(config)) { DbUtil.CreateSingleChunk(config, 0, GetFilePathFor("chunk-000000.000000")); DbUtil.CreateMultiChunk(config, 1, 2, GetFilePathFor("chunk-000001.000001"), physicalSize: 50, logicalSize: 150); Assert.DoesNotThrow(() => db.Open(verifyHash: false)); Assert.IsNotNull(db.Manager.GetChunk(2)); Assert.IsTrue(File.Exists(GetFilePathFor("chunk-000000.000000"))); Assert.IsTrue(File.Exists(GetFilePathFor("chunk-000001.000001"))); Assert.IsTrue(File.Exists(GetFilePathFor("chunk-000003.000000"))); Assert.AreEqual(3, Directory.GetFiles(PathName, "*").Length); } }
public void CreateDb(params Rec[] records) { if (DbRes != null) { DbRes.Db.Close(); } var dbConfig = TFChunkHelper.CreateSizedDbConfig(PathName, 0, chunkSize: 1024 * 1024); var dbHelper = new TFChunkDbCreationHelper(dbConfig); DbRes = dbHelper.Chunk(records).CreateDb(); DbRes.Db.Config.WriterCheckpoint.Flush(); DbRes.Db.Config.ChaserCheckpoint.Write(DbRes.Db.Config.WriterCheckpoint.Read()); DbRes.Db.Config.ChaserCheckpoint.Flush(); var readers = new ObjectPool <ITransactionFileReader>( "Readers", 2, 2, () => new TFChunkReader(DbRes.Db, DbRes.Db.Config.WriterCheckpoint)); var lowHasher = new XXHashUnsafe(); var highHasher = new Murmur3AUnsafe(); TableIndex = new TableIndex(GetFilePathFor("index"), lowHasher, highHasher, () => new HashListMemTable(PTableVersions.IndexV3, MaxEntriesInMemTable * 2), () => new TFReaderLease(readers), PTableVersions.IndexV3, int.MaxValue, Constants.PTableMaxReaderCountDefault, MaxEntriesInMemTable); ReadIndex = new ReadIndex(new NoopPublisher(), readers, TableIndex, 0, additionalCommitChecks: true, metastreamMaxCount: _metastreamMaxCount, hashCollisionReadLimit: Opts.HashCollisionReadLimitDefault, skipIndexScanOnReads: Opts.SkipIndexScanOnReadsDefault, replicationCheckpoint: DbRes.Db.Config.ReplicationCheckpoint, indexCheckpoint: DbRes.Db.Config.IndexCheckpoint); ((ReadIndex)ReadIndex).IndexCommitter.Init(DbRes.Db.Config.ChaserCheckpoint.Read()); }
public void temporary_files_are_removed() { var config = TFChunkHelper.CreateSizedDbConfig(PathName, 150, chunkSize: 100); using (var db = new TFChunkDb(config)) { DbUtil.CreateSingleChunk(config, 0, GetFilePathFor("chunk-000000.000000")); DbUtil.CreateOngoingChunk(config, 1, GetFilePathFor("chunk-000001.000001")); File.Create(GetFilePathFor("bla")).Close(); File.Create(GetFilePathFor("bla.scavenge.tmp")).Close(); File.Create(GetFilePathFor("bla.tmp")).Close(); Assert.DoesNotThrow(() => db.Open(verifyHash: false)); Assert.IsTrue(File.Exists(GetFilePathFor("chunk-000000.000000"))); Assert.IsTrue(File.Exists(GetFilePathFor("chunk-000001.000001"))); Assert.IsTrue(File.Exists(GetFilePathFor("bla"))); Assert.AreEqual(3, Directory.GetFiles(PathName, "*").Length); } }
public override async Task TestFixtureSetUp() { await base.TestFixtureSetUp(); var dbConfig = TFChunkHelper.CreateSizedDbConfig(PathName, 0, chunkSize: 1024 * 1024); var dbCreationHelper = new TFChunkDbCreationHelper(dbConfig); _dbResult = CreateDb(dbCreationHelper); _keptRecords = KeptRecords(_dbResult); _dbResult.Db.Config.WriterCheckpoint.Flush(); _dbResult.Db.Config.ChaserCheckpoint.Write(_dbResult.Db.Config.WriterCheckpoint.Read()); _dbResult.Db.Config.ChaserCheckpoint.Flush(); var indexPath = Path.Combine(PathName, "index"); var readerPool = new ObjectPool <ITransactionFileReader>( "ReadIndex readers pool", Constants.PTableInitialReaderCount, Constants.PTableMaxReaderCountDefault, () => new TFChunkReader(_dbResult.Db, _dbResult.Db.Config.WriterCheckpoint)); var lowHasher = new XXHashUnsafe(); var highHasher = new Murmur3AUnsafe(); var tableIndex = new TableIndex(indexPath, lowHasher, highHasher, () => new HashListMemTable(PTableVersions.IndexV3, maxSize: 200), () => new TFReaderLease(readerPool), PTableVersions.IndexV3, 5, Constants.PTableMaxReaderCountDefault, maxSizeForMemory: 100, maxTablesPerLevel: 2); ReadIndex = new ReadIndex(new NoopPublisher(), readerPool, tableIndex, 100, true, _metastreamMaxCount, Opts.HashCollisionReadLimitDefault, Opts.SkipIndexScanOnReadsDefault, _dbResult.Db.Config.ReplicationCheckpoint, _dbResult.Db.Config.IndexCheckpoint); ((ReadIndex)ReadIndex).IndexCommitter.Init(_dbResult.Db.Config.WriterCheckpoint.Read()); var scavenger = new TFChunkScavenger(_dbResult.Db, new FakeTFScavengerLog(), tableIndex, ReadIndex, unsafeIgnoreHardDeletes: UnsafeIgnoreHardDelete()); await scavenger.Scavenge(alwaysKeepScavenged : true, mergeChunks : false); }