public override void TestFixtureSetUp() { base.TestFixtureSetUp(); ReadIndex.Close(); ReadIndex.Dispose(); TableIndex.Close(removeFiles: false); var readers = new ObjectPool <ITransactionFileReader>("Readers", 2, 2, () => new TFChunkReader(Db, WriterCheckpoint)); var lowHasher = new XXHashUnsafe(); var highHasher = new Murmur3AUnsafe(); TableIndex = new TableIndex(GetFilePathFor("index"), lowHasher, highHasher, () => new HashListMemTable(PTableVersions.IndexV2, maxSize: MaxEntriesInMemTable * 2), () => new TFReaderLease(readers), PTableVersions.IndexV2, 5, maxSizeForMemory: MaxEntriesInMemTable); ReadIndex = new ReadIndex(new NoopPublisher(), readers, TableIndex, 0, additionalCommitChecks: true, metastreamMaxCount: 1, hashCollisionReadLimit: Opts.HashCollisionReadLimitDefault, skipIndexScanOnReads: Opts.SkipIndexScanOnReadsDefault, replicationCheckpoint: Db.Config.ReplicationCheckpoint); ReadIndex.Init(ChaserCheckpoint.Read()); }
public override async Task TestFixtureSetUp() { await base.TestFixtureSetUp(); if (TruncateCheckpoint == long.MinValue) { throw new InvalidOperationException("AckCheckpoint must be set in WriteTestScenario."); } OnBeforeTruncating(); // need to close db before truncator can delete files ReadIndex.Close(); ReadIndex.Dispose(); TableIndex.Close(removeFiles: false); Db.Close(); Db.Dispose(); var truncator = new TFChunkDbTruncator(Db.Config); truncator.TruncateDb(TruncateCheckpoint); }
public override void TestFixtureTearDown() { TableIndex.ClearAll(); ReadIndex.Close(); ReadIndex.Dispose(); Db.Close(); Db.Dispose(); base.TestFixtureTearDown(); }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); ReadIndex.Close(); ReadIndex.Dispose(); Thread.Sleep(500); TableIndex.ClearAll(removeFiles: false); TableIndex = new TableIndex(Path.Combine(PathName, "index"), () => new HashListMemTable(), maxSizeForMemory: 5); TableIndex.Initialize(); ReadIndex = new ReadIndex(new NoopPublisher(), 2, () => new TFChunkSequentialReader(Db, WriterCheckpoint, 0), () => new TFChunkReader(Db, WriterCheckpoint), TableIndex, new ByLengthHasher()); ReadIndex.Build(); }
public override async Task TestFixtureSetUp() { await base.TestFixtureSetUp(); ReadIndex.Close(); ReadIndex.Dispose(); TableIndex.Close(removeFiles: false); var readers = new ObjectPool <ITransactionFileReader>("Readers", 2, 2, () => new TFChunkReader(Db, WriterCheckpoint)); var lowHasher = _logFormat.LowHasher; var highHasher = _logFormat.HighHasher; var emptyStreamId = _logFormat.EmptyStreamId; TableIndex = new TableIndex <TStreamId>(GetFilePathFor("index"), lowHasher, highHasher, emptyStreamId, () => new HashListMemTable(PTableVersions.IndexV2, maxSize: MaxEntriesInMemTable * 2), () => new TFReaderLease(readers), PTableVersions.IndexV2, 5, Constants.PTableMaxReaderCountDefault, maxSizeForMemory: MaxEntriesInMemTable); var readIndex = new ReadIndex <TStreamId>(new NoopPublisher(), readers, TableIndex, _logFormat.StreamNameIndexConfirmer, _logFormat.StreamIds, _logFormat.StreamNamesProvider, _logFormat.EmptyStreamId, _logFormat.StreamIdValidator, _logFormat.StreamIdSizer, _logFormat.StreamExistenceFilter, _logFormat.StreamExistenceFilterReader, _logFormat.EventTypeIndexConfirmer, streamInfoCacheCapacity: 0, additionalCommitChecks: true, metastreamMaxCount: 1, hashCollisionReadLimit: Opts.HashCollisionReadLimitDefault, skipIndexScanOnReads: Opts.SkipIndexScanOnReadsDefault, replicationCheckpoint: Db.Config.ReplicationCheckpoint, indexCheckpoint: Db.Config.IndexCheckpoint); readIndex.IndexCommitter.Init(ChaserCheckpoint.Read()); ReadIndex = readIndex; }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); ReadIndex.Close(); ReadIndex.Dispose(); TableIndex.ClearAll(removeFiles: false); TableIndex = new TableIndex(Path.Combine(PathName, "index"), () => new HashListMemTable(maxSize: 2000), maxSizeForMemory: MaxEntriesInMemTable); ReadIndex = new ReadIndex(new NoopPublisher(), 2, () => new TFChunkSequentialReader(Db, WriterChecksum, 0), () => new TFChunkReader(Db, WriterChecksum), TableIndex, new ByLengthHasher(), new NoLRUCache <string, StreamCacheInfo>()); ReadIndex.Build(); }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); ReadIndex.Close(); ReadIndex.Dispose(); TableIndex.Close(removeFiles: false); var readers = new ObjectPool <ITransactionFileReader>("Readers", 2, 2, () => new TFChunkReader(Db, WriterCheckpoint)); TableIndex = new TableIndex(GetFilePathFor("index"), () => new HashListMemTable(maxSize: MaxEntriesInMemTable * 2), () => new TFReaderLease(readers), maxSizeForMemory: MaxEntriesInMemTable); ReadIndex = new ReadIndex(new NoopPublisher(), readers, TableIndex, new ByLengthHasher(), 0, additionalCommitChecks: true, metastreamMaxCount: 1); ReadIndex.Init(ChaserCheckpoint.Read()); }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); ReadIndex.Close(); ReadIndex.Dispose(); TableIndex.Close(removeFiles: false); TableIndex = new TableIndex(GetFilePathFor("index"), () => new HashListMemTable(maxSize: MaxEntriesInMemTable * 2), maxSizeForMemory: MaxEntriesInMemTable); ReadIndex = new ReadIndex(new NoopPublisher(), 2, 2, () => new TFChunkReader(Db, WriterCheckpoint, 0), TableIndex, new ByLengthHasher(), new NoLRUCache <string, StreamCacheInfo>(), additionalCommitChecks: true, metastreamMaxCount: 1); ReadIndex.Init(WriterCheckpoint.Read(), ChaserCheckpoint.Read()); }