public void try_read_does_not_cache_anything_and_returns_record_once_it_is_written_later() { var writerchk = new InMemoryCheckpoint(0); var chaserchk = new InMemoryCheckpoint(0); var db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, writerchk, chaserchk)); db.Open(); var writer = new TFChunkWriter(db); writer.Open(); var reader = new TFChunkReader(db, writerchk, 0); Assert.IsFalse(reader.TryReadNext().Success); var rec = LogRecord.SingleWrite(0, Guid.NewGuid(), Guid.NewGuid(), "ES", -1, "ET", new byte[] { 7 }, null); long tmp; Assert.IsTrue(writer.Write(rec, out tmp)); writer.Flush(); writer.Close(); var res = reader.TryReadNext(); Assert.IsTrue(res.Success); Assert.AreEqual(rec, res.LogRecord); db.Close(); }
public void try_read_does_not_cache_anything_and_returns_record_once_it_is_written_later() { var writerchk = new InMemoryCheckpoint(0); var db = new TFChunkDb(new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 10000, 0, writerchk, new InMemoryCheckpoint())); db.OpenVerifyAndClean(); var writer = new TFChunkWriter(db); writer.Open(); var reader = new TFChunkSequentialReader(db, writerchk, 0); LogRecord record; Assert.IsFalse(reader.TryReadNext(out record)); var rec = LogRecord.SingleWrite(0, Guid.NewGuid(), Guid.NewGuid(), "ES", -1, "ET", new byte[] { 7 }, null); long tmp; Assert.IsTrue(writer.Write(rec, out tmp)); writer.Flush(); writer.Close(); Assert.IsTrue(reader.TryReadNext(out record)); Assert.AreEqual(rec, record); reader.Close(); db.Close(); }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); WriterCheckpoint = new InMemoryCheckpoint(0); var chaserchk = new InMemoryCheckpoint(Checkpoint.Chaser, 0); Db = new TFChunkDb(new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 10000, 0, WriterCheckpoint, new[] { chaserchk })); Db.OpenVerifyAndClean(); // create db Writer = new TFChunkWriter(Db); Writer.Open(); WriteTestScenario(); Writer.Close(); Writer = null; WriterCheckpoint.Flush(); chaserchk.Write(WriterCheckpoint.Read()); chaserchk.Flush(); TableIndex = new TableIndex(Path.Combine(PathName, "index"), () => new HashListMemTable(), _maxEntriesInMemTable); TableIndex.Initialize(); var reader = new TFChunkReader(Db, Db.Config.WriterCheckpoint); ReadIndex = new ReadIndex(new NoopPublisher(), 2, () => new TFChunkSequentialReader(Db, Db.Config.WriterCheckpoint, 0), () => reader, TableIndex, new ByLengthHasher()); ReadIndex.Build(); }
public override async Task TestFixtureSetUp() { await base.TestFixtureSetUp(); WriterCheckpoint = new InMemoryCheckpoint(0); ChaserCheckpoint = new InMemoryCheckpoint(0); ReplicationCheckpoint = new InMemoryCheckpoint(-1); Db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, WriterCheckpoint, ChaserCheckpoint, replicationCheckpoint: ReplicationCheckpoint)); Db.Open(); // create db Writer = new TFChunkWriter(Db); Writer.Open(); WriteTestScenario(); Writer.Close(); Writer = null; WriterCheckpoint.Flush(); ChaserCheckpoint.Write(WriterCheckpoint.Read()); ChaserCheckpoint.Flush(); var readers = new ObjectPool <ITransactionFileReader>("Readers", 2, 5, () => new TFChunkReader(Db, Db.Config.WriterCheckpoint)); var lowHasher = new XXHashUnsafe(); var highHasher = new Murmur3AUnsafe(); TableIndex = new TableIndex(GetFilePathFor("index"), lowHasher, highHasher, () => new HashListMemTable(IndexBitnessVersion, MaxEntriesInMemTable * 2), () => new TFReaderLease(readers), IndexBitnessVersion, int.MaxValue, Constants.PTableMaxReaderCountDefault, MaxEntriesInMemTable); ReadIndex = new ReadIndex(new NoopPublisher(), readers, TableIndex, 0, additionalCommitChecks: PerformAdditionalCommitChecks, metastreamMaxCount: MetastreamMaxCount, hashCollisionReadLimit: Opts.HashCollisionReadLimitDefault, skipIndexScanOnReads: Opts.SkipIndexScanOnReadsDefault, replicationCheckpoint: Db.Config.ReplicationCheckpoint); ReadIndex.Init(ChaserCheckpoint.Read()); // scavenge must run after readIndex is built if (_scavenge) { if (_completeLastChunkOnScavenge) { Db.Manager.GetChunk(Db.Manager.ChunksCount - 1).Complete(); } _scavenger = new TFChunkScavenger(Db, new FakeTFScavengerLog(), TableIndex, ReadIndex); await _scavenger.Scavenge(alwaysKeepScavenged : true, mergeChunks : _mergeChunks); } }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); WriterCheckpoint = new InMemoryCheckpoint(0); ChaserCheckpoint = new InMemoryCheckpoint(0); Db = new TFChunkDb(new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 10000, 0, WriterCheckpoint, ChaserCheckpoint, new InMemoryCheckpoint(-1), new InMemoryCheckpoint(-1))); Db.Open(); // create db Writer = new TFChunkWriter(Db); Writer.Open(); WriteTestScenario(); Writer.Close(); Writer = null; WriterCheckpoint.Flush(); ChaserCheckpoint.Write(WriterCheckpoint.Read()); ChaserCheckpoint.Flush(); var readers = new ObjectPool <ITransactionFileReader>("Readers", 2, 2, () => new TFChunkReader(Db, Db.Config.WriterCheckpoint)); TableIndex = new TableIndex(GetFilePathFor("index"), () => new HashListMemTable(MaxEntriesInMemTable * 2), () => new TFReaderLease(readers), MaxEntriesInMemTable); var hasher = new ByLengthHasher(); ReadIndex = new ReadIndex(new NoopPublisher(), readers, TableIndex, hasher, 0, additionalCommitChecks: true, metastreamMaxCount: MetastreamMaxCount); ReadIndex.Init(ChaserCheckpoint.Read()); // scavenge must run after readIndex is built if (_scavenge) { if (_completeLastChunkOnScavenge) { Db.Manager.GetChunk(Db.Manager.ChunksCount - 1).Complete(); } _scavenger = new TFChunkScavenger(Db, TableIndex, hasher, ReadIndex); _scavenger.Scavenge(alwaysKeepScavenged: true, mergeChunks: _mergeChunks); } }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); string dbPath = Path.Combine(PathName, string.Format("mini-node-db-{0}", Guid.NewGuid())); Bus = new InMemoryBus("bus"); IODispatcher = new IODispatcher(Bus, new PublishEnvelope(Bus)); if (!Directory.Exists(dbPath)) { Directory.CreateDirectory(dbPath); } var writerCheckFilename = Path.Combine(dbPath, Checkpoint.Writer + ".chk"); var chaserCheckFilename = Path.Combine(dbPath, Checkpoint.Chaser + ".chk"); if (Runtime.IsMono) { WriterCheckpoint = new FileCheckpoint(writerCheckFilename, Checkpoint.Writer, cached: true); ChaserCheckpoint = new FileCheckpoint(chaserCheckFilename, Checkpoint.Chaser, cached: true); } else { WriterCheckpoint = new MemoryMappedFileCheckpoint(writerCheckFilename, Checkpoint.Writer, cached: true); ChaserCheckpoint = new MemoryMappedFileCheckpoint(chaserCheckFilename, Checkpoint.Chaser, cached: true); } Db = new TFChunkDb(new TFChunkDbConfig(dbPath, new VersionedPatternFileNamingStrategy(dbPath, "chunk-"), TFConsts.ChunkSize, 0, WriterCheckpoint, ChaserCheckpoint, new InMemoryCheckpoint(-1), new InMemoryCheckpoint(-1), inMemDb: false)); Db.Open(); // create DB Writer = new TFChunkWriter(Db); Writer.Open(); WriteTestScenario(); Writer.Close(); Writer = null; WriterCheckpoint.Flush(); ChaserCheckpoint.Write(WriterCheckpoint.Read()); ChaserCheckpoint.Flush(); Db.Close(); // start node with our created DB Node = new MiniNode(PathName, inMemDb: false, dbPath: dbPath); Node.Start(); Given(); }
public override async Task TestFixtureSetUp() { await base.TestFixtureSetUp(); string dbPath = Path.Combine(PathName, string.Format("mini-node-db-{0}", Guid.NewGuid())); _logFormatFactory = LogFormatHelper <TLogFormat, TStreamId> .LogFormatFactory.Create(new() { IndexDirectory = GetFilePathFor("index"), }); Bus = new InMemoryBus("bus"); IODispatcher = new IODispatcher(Bus, new PublishEnvelope(Bus)); if (!Directory.Exists(dbPath)) { Directory.CreateDirectory(dbPath); } var writerCheckFilename = Path.Combine(dbPath, Checkpoint.Writer + ".chk"); var chaserCheckFilename = Path.Combine(dbPath, Checkpoint.Chaser + ".chk"); WriterCheckpoint = new MemoryMappedFileCheckpoint(writerCheckFilename, Checkpoint.Writer, cached: true); ChaserCheckpoint = new MemoryMappedFileCheckpoint(chaserCheckFilename, Checkpoint.Chaser, cached: true); Db = new TFChunkDb(TFChunkHelper.CreateDbConfig(dbPath, WriterCheckpoint, ChaserCheckpoint, TFConsts.ChunkSize)); Db.Open(); // create DB Writer = new TFChunkWriter(Db); Writer.Open(); var pm = _logFormatFactory.CreatePartitionManager( reader: new TFChunkReader(Db, WriterCheckpoint), writer: Writer); pm.Initialize(); WriteTestScenario(); Writer.Close(); Writer = null; WriterCheckpoint.Flush(); ChaserCheckpoint.Write(WriterCheckpoint.Read()); ChaserCheckpoint.Flush(); Db.Close(); // start node with our created DB Node = new MiniNode <TLogFormat, TStreamId>(PathName, inMemDb: false, dbPath: dbPath); await Node.Start(); try { await Given().WithTimeout(); } catch (Exception ex) { throw new Exception("Given Failed", ex); } }
public void a_record_can_be_written() { var filename = GetFilePathFor("chunk-000000.000000"); var chunkHeader = new ChunkHeader(TFChunk.CurrentChunkVersion, 10000, 0, 0, false, Guid.NewGuid()); var chunkBytes = chunkHeader.AsByteArray(); var buf = new byte[ChunkHeader.Size + ChunkFooter.Size + chunkHeader.ChunkSize]; Buffer.BlockCopy(chunkBytes, 0, buf, 0, chunkBytes.Length); File.WriteAllBytes(filename, buf); _checkpoint = new InMemoryCheckpoint(137); var db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, _checkpoint, new InMemoryCheckpoint(), chunkSize: chunkHeader.ChunkSize)); db.Open(); var bytes = new byte[3994]; // this gives exactly 4097 size of record, with 3993 (rec size 4096) everything works fine! new Random().NextBytes(bytes); var writer = new TFChunkWriter(db); var logFormat = LogFormatHelper <TLogFormat, TStreamId> .LogFormat; logFormat.StreamNameIndex.GetOrAddId("WorldEnding", out var streamId, out _, out _); var record = LogRecord.Prepare( factory: logFormat.RecordFactory, logPosition: 137, correlationId: _correlationId, eventId: _eventId, transactionPos: 789, transactionOffset: 543, eventStreamId: streamId, expectedVersion: 1234, timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.SingleWrite, eventType: "type", data: bytes, metadata: new byte[] { 0x07, 0x17 }); long pos; Assert.IsTrue(writer.Write(record, out pos)); writer.Close(); db.Dispose(); Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix() + 137, _checkpoint.Read()); using (var filestream = File.Open(filename, FileMode.Open, FileAccess.Read)) { filestream.Seek(ChunkHeader.Size + 137 + sizeof(int), SeekOrigin.Begin); var reader = new BinaryReader(filestream); var read = LogRecord.ReadFrom(reader, (int)reader.BaseStream.Length); Assert.AreEqual(record, read); } }
public void a_record_can_be_written() { var filename = Path.Combine(PathName, "prefix.tf0"); var chunkHeader = new ChunkHeader(TFChunk.CurrentChunkVersion, 10000, 0, 0, 0); var chunkBytes = chunkHeader.AsByteArray(); var buf = new byte[ChunkHeader.Size + ChunkFooter.Size + chunkHeader.ChunkSize]; Buffer.BlockCopy(chunkBytes, 0, buf, 0, chunkBytes.Length); File.WriteAllBytes(filename, buf); _checkpoint = new InMemoryCheckpoint(137); var db = new TFChunkDb(new TFChunkDbConfig(PathName, new PrefixFileNamingStrategy(PathName, "prefix.tf"), chunkHeader.ChunkSize, 0, _checkpoint, new ICheckpoint[0])); db.OpenVerifyAndClean(); var writer = new TFChunkWriter(db); var record = new PrepareLogRecord(logPosition: 0, correlationId: _correlationId, eventId: _eventId, transactionPosition: 0, transactionOffset: 0, eventStreamId: "WorldEnding", expectedVersion: 1234, timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: "type", data: new byte[8000], metadata: new byte[] { 7, 17 }); Console.WriteLine(record.GetSizeWithLengthPrefixAndSuffix()); Console.WriteLine(record.GetSizeWithLengthPrefixAndSuffix() + 137); long pos; Assert.IsTrue(writer.Write(record, out pos)); writer.Close(); db.Dispose(); Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix() + 137, _checkpoint.Read()); using (var filestream = File.Open(filename, FileMode.Open, FileAccess.Read)) { filestream.Seek(ChunkHeader.Size + 137 + sizeof(int), SeekOrigin.Begin); var reader = new BinaryReader(filestream); var read = LogRecord.ReadFrom(reader); Console.WriteLine(string.Join("\n", Directory.EnumerateFiles(PathName))); Assert.AreEqual(record, read); } }
public override async Task TestFixtureSetUp() { await base.TestFixtureSetUp(); string dbPath = Path.Combine(PathName, string.Format("mini-node-db-{0}", Guid.NewGuid())); Bus = new InMemoryBus("bus"); IODispatcher = new IODispatcher(Bus, new PublishEnvelope(Bus)); if (!Directory.Exists(dbPath)) { Directory.CreateDirectory(dbPath); } var writerCheckFilename = Path.Combine(dbPath, Checkpoint.Writer + ".chk"); var chaserCheckFilename = Path.Combine(dbPath, Checkpoint.Chaser + ".chk"); if (Runtime.IsMono) { WriterCheckpoint = new FileCheckpoint(writerCheckFilename, Checkpoint.Writer, cached: true); ChaserCheckpoint = new FileCheckpoint(chaserCheckFilename, Checkpoint.Chaser, cached: true); } else { WriterCheckpoint = new MemoryMappedFileCheckpoint(writerCheckFilename, Checkpoint.Writer, cached: true); ChaserCheckpoint = new MemoryMappedFileCheckpoint(chaserCheckFilename, Checkpoint.Chaser, cached: true); } Db = new TFChunkDb(TFChunkHelper.CreateDbConfig(dbPath, WriterCheckpoint, ChaserCheckpoint, TFConsts.ChunkSize)); Db.Open(); // create DB Writer = new TFChunkWriter(Db); Writer.Open(); WriteTestScenario(); Writer.Close(); Writer = null; WriterCheckpoint.Flush(); ChaserCheckpoint.Write(WriterCheckpoint.Read()); ChaserCheckpoint.Flush(); Db.Close(); // start node with our created DB Node = new MiniNode(PathName, inMemDb: false, dbPath: dbPath); await Node.Start(); try { await Given().WithTimeout(); } catch (Exception ex) { throw new Exception("Given Failed", ex); } }
public void try_read_returns_record_when_record_bigger_than_internal_buffer() { var writerchk = new InMemoryCheckpoint(0); var chaserchk = new InMemoryCheckpoint(Checkpoint.Chaser, 0); var db = new TFChunkDb(new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 10000, 0, writerchk, chaserchk, new InMemoryCheckpoint(-1), new InMemoryCheckpoint(-1))); db.Open(); var recordToWrite = new PrepareLogRecord(logPosition: 0, correlationId: _correlationId, eventId: _eventId, transactionPosition: 0, transactionOffset: 0, eventStreamId: "WorldEnding", expectedVersion: 1234, timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: "type", data: new byte[9000], metadata: new byte[] { 7, 17 }); var writer = new TFChunkWriter(db); writer.Open(); long pos; Assert.IsTrue(writer.Write(recordToWrite, out pos)); writer.Close(); writerchk.Write(recordToWrite.GetSizeWithLengthPrefixAndSuffix()); var reader = new TFChunkChaser(db, writerchk, chaserchk); reader.Open(); LogRecord record; var readRecord = reader.TryReadNext(out record); reader.Close(); Assert.IsTrue(readRecord); Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix(), chaserchk.Read()); Assert.AreEqual(recordToWrite, record); db.Close(); }
public void a_record_can_be_written() { var filename = Path.Combine(PathName, "prefix.tf0"); var chunkHeader = new ChunkHeader(TFChunk.CurrentChunkVersion, 10000, 0, 0, 0); var chunkBytes = chunkHeader.AsByteArray(); var buf = new byte[ChunkHeader.Size + ChunkFooter.Size + chunkHeader.ChunkSize]; Buffer.BlockCopy(chunkBytes, 0, buf, 0, chunkBytes.Length); File.WriteAllBytes(filename, buf); _checkpoint = new InMemoryCheckpoint(137); var db = new TFChunkDb(new TFChunkDbConfig(PathName, new PrefixFileNamingStrategy(PathName, "prefix.tf"), chunkHeader.ChunkSize, 0, _checkpoint, new InMemoryCheckpoint(), new ICheckpoint[0])); db.OpenVerifyAndClean(); var bytes = new byte[3994]; // this gives exactly 4097 size of record, with 3993 (rec size 4096) everything works fine! new Random().NextBytes(bytes); var writer = new TFChunkWriter(db); var record = new PrepareLogRecord(logPosition: 123, correlationId: _correlationId, eventId: _eventId, transactionPosition: 789, transactionOffset: 543, eventStreamId: "WorldEnding", expectedVersion: 1234, timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.SingleWrite, eventType: "type", data: bytes, metadata: new byte[] { 0x07, 0x17 }); long pos; Assert.IsTrue(writer.Write(record, out pos)); writer.Close(); db.Dispose(); Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix() + 137, _checkpoint.Read()); using (var filestream = File.Open(filename, FileMode.Open, FileAccess.Read)) { filestream.Seek(ChunkHeader.Size + 137 + sizeof(int), SeekOrigin.Begin); var reader = new BinaryReader(filestream); var read = LogRecord.ReadFrom(reader); Assert.AreEqual(record, read); } }
public void try_read_returns_record_when_record_bigger_than_internal_buffer() { var writerchk = new InMemoryCheckpoint(0); var chaserchk = new InMemoryCheckpoint(Checkpoint.Chaser, 0); var db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, writerchk, chaserchk)); db.Open(); var recordFactory = LogFormatHelper <TLogFormat, TStreamId> .RecordFactory; var streamId = LogFormatHelper <TLogFormat, TStreamId> .StreamId; var eventTypeId = LogFormatHelper <TLogFormat, TStreamId> .EventTypeId; var recordToWrite = LogRecord.Prepare( factory: recordFactory, logPosition: 0, correlationId: _correlationId, eventId: _eventId, transactionPos: 0, transactionOffset: 0, eventStreamId: streamId, expectedVersion: 1234, timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: eventTypeId, data: new byte[9000], metadata: new byte[] { 7, 17 }); var writer = new TFChunkWriter(db); writer.Open(); long pos; Assert.IsTrue(writer.Write(recordToWrite, out pos)); writer.Close(); writerchk.Write(recordToWrite.GetSizeWithLengthPrefixAndSuffix()); var reader = new TFChunkChaser(db, writerchk, chaserchk, false); reader.Open(); ILogRecord record; var readRecord = reader.TryReadNext(out record); reader.Close(); Assert.IsTrue(readRecord); Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix(), chaserchk.Read()); Assert.AreEqual(recordToWrite, record); db.Close(); }
public void a_record_can_be_written() { var filename = Path.Combine(PathName, "prefix.tf0"); var chunkHeader = new ChunkHeader(TFChunk.CurrentChunkVersion, 10000, 0, 0, 0); var chunkBytes = chunkHeader.AsByteArray(); var bytes = new byte[ChunkHeader.Size + 10000 + ChunkFooter.Size]; Buffer.BlockCopy(chunkBytes, 0, bytes, 0, chunkBytes.Length); File.WriteAllBytes(filename, bytes); _checkpoint = new InMemoryCheckpoint(137); var db = new TFChunkDb(new TFChunkDbConfig(PathName, new PrefixFileNamingStrategy(PathName, "prefix.tf"), 10000, 0, _checkpoint, new InMemoryCheckpoint(), new ICheckpoint[0])); db.OpenVerifyAndClean(); var tf = new TFChunkWriter(db); var record = new PrepareLogRecord(logPosition: 0, correlationId: _correlationId, eventId: _eventId, expectedVersion: 1234, transactionPosition: 0, transactionOffset: 0, eventStreamId: "WorldEnding", timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: "type", data: new byte[] { 1, 2, 3, 4, 5 }, metadata: new byte[] { 7, 17 }); long tmp; tf.Write(record, out tmp); //tf.Flush(); tf.Close(); db.Dispose(); Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix() + 137, _checkpoint.Read()); //137 is fluff assigned to beginning of checkpoint //TODO actually read the event using (var filestream = File.Open(filename, FileMode.Open, FileAccess.Read)) { filestream.Seek(ChunkHeader.Size + 137 + sizeof(int), SeekOrigin.Begin); var reader = new BinaryReader(filestream); var read = LogRecord.ReadFrom(reader); Assert.AreEqual(record, read); } }
public void try_read_returns_record_when_writerchecksum_equal() { var writerchk = new InMemoryCheckpoint(0); var chaserchk = new InMemoryCheckpoint(Checkpoint.Chaser, 0); var db = new TFChunkDb(new TFChunkDbConfig(PathName, new PrefixFileNamingStrategy(PathName, "prefix.tf"), 10000, 0, writerchk, new[] { chaserchk })); db.OpenVerifyAndClean(); var recordToWrite = new PrepareLogRecord(logPosition: 0, correlationId: _correlationId, eventId: _eventId, transactionPosition: 0, eventStreamId: "WorldEnding", expectedVersion: 1234, timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: "type", data: new byte[] { 1, 2, 3, 4, 5 }, metadata: new byte[] { 7, 17 }); var writer = new TFChunkWriter(db); writer.Open(); long pos; Assert.IsTrue(writer.Write(recordToWrite, out pos)); writer.Close(); writerchk.Write(recordToWrite.GetSizeWithLengthPrefix()); var chaser = new TFChunkChaser(db, writerchk, chaserchk); chaser.Open(); LogRecord record; var readRecord = chaser.TryReadNext(out record); chaser.Close(); Assert.IsTrue(readRecord); Assert.AreEqual(record.GetSizeWithLengthPrefix(), chaserchk.Read()); Assert.AreEqual(recordToWrite, record); db.Close(); }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); WriterChecksum = new InMemoryCheckpoint(0); ChaserChecksum = new InMemoryCheckpoint(0); Db = new TFChunkDb(new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 10000, 0, WriterChecksum, ChaserChecksum, new[] { WriterChecksum, ChaserChecksum })); Db.OpenVerifyAndClean(); // create db Writer = new TFChunkWriter(Db); Writer.Open(); WriteTestScenario(); Writer.Close(); Writer = null; WriterChecksum.Flush(); ChaserChecksum.Write(WriterChecksum.Read()); ChaserChecksum.Flush(); TableIndex = new TableIndex(Path.Combine(PathName, "index"), () => new HashListMemTable(MaxEntriesInMemTable * 2), MaxEntriesInMemTable); var reader = new TFChunkReader(Db, Db.Config.WriterCheckpoint); ReadIndex = new ReadIndex(new NoopPublisher(), 2, () => new TFChunkSequentialReader(Db, Db.Config.WriterCheckpoint, 0), () => reader, TableIndex, new ByLengthHasher(), new NoLRUCache <string, StreamCacheInfo>()); ReadIndex.Build(); // scavenge must run after readIndex is built if (_scavenge) { _scavenger = new TFChunkScavenger(Db, ReadIndex); _scavenger.Scavenge(alwaysKeepScavenged: true); } }
public void a_record_can_be_written() { var filename = GetFilePathFor("chunk-000000.000000"); var chunkHeader = new ChunkHeader(TFChunk.CurrentChunkVersion, 10000, 0, 0, false, chunkId: Guid.NewGuid()); var chunkBytes = chunkHeader.AsByteArray(); var bytes = new byte[ChunkHeader.Size + 10000 + ChunkFooter.Size]; Buffer.BlockCopy(chunkBytes, 0, bytes, 0, chunkBytes.Length); File.WriteAllBytes(filename, bytes); _checkpoint = new InMemoryCheckpoint(137); var db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, _checkpoint, new InMemoryCheckpoint())); db.Open(); var recordFactory = LogFormatHelper <TLogFormat, TStreamId> .RecordFactory; var streamId = LogFormatHelper <TLogFormat, TStreamId> .StreamId; var eventTypeId = LogFormatHelper <TLogFormat, TStreamId> .EventTypeId; var tf = new TFChunkWriter(db); var record = LogRecord.Prepare( factory: recordFactory, logPosition: _checkpoint.Read(), correlationId: _correlationId, eventId: _eventId, expectedVersion: 1234, transactionPos: 0, transactionOffset: 0, eventStreamId: streamId, timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: eventTypeId, data: new byte[] { 1, 2, 3, 4, 5 }, metadata: new byte[] { 7, 17 }); long tmp; tf.Write(record, out tmp); tf.Close(); db.Dispose(); Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix() + 137, _checkpoint.Read()); //137 is fluff assigned to beginning of checkpoint using (var filestream = File.Open(filename, FileMode.Open, FileAccess.Read)) { filestream.Seek(ChunkHeader.Size + 137 + sizeof(int), SeekOrigin.Begin); var reader = new BinaryReader(filestream); var read = LogRecord.ReadFrom(reader, (int)reader.BaseStream.Length); Assert.AreEqual(record, read); } }
public void try_read_returns_record_when_writerchecksum_equal() { var writerchk = new InMemoryCheckpoint(0); var chaserchk = new InMemoryCheckpoint(Checkpoint.Chaser, 0); var db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, writerchk, chaserchk)); db.Open(); _logFormat.StreamNameIndex.GetOrAddId("WorldEnding", out var streamId, out _, out _); var recordToWrite = LogRecord.Prepare( factory: _logFormat.RecordFactory, logPosition: 0, correlationId: _correlationId, eventId: _eventId, transactionPos: 0, transactionOffset: 0, eventStreamId: streamId, expectedVersion: 1234, timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: "type", data: new byte[] { 1, 2, 3, 4, 5 }, metadata: new byte[] { 7, 17 }); var writer = new TFChunkWriter(db); writer.Open(); long pos; Assert.IsTrue(writer.Write(recordToWrite, out pos)); writer.Close(); writerchk.Write(recordToWrite.GetSizeWithLengthPrefixAndSuffix()); var chaser = new TFChunkChaser(db, writerchk, chaserchk, false); chaser.Open(); ILogRecord record; var readRecord = chaser.TryReadNext(out record); chaser.Close(); Assert.IsTrue(readRecord); Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix(), chaserchk.Read()); Assert.AreEqual(recordToWrite, record); db.Close(); }
public void a_record_can_be_written() { _checkpoint = new InMemoryCheckpoint(0); var db = new TFChunkDb(new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 1000, 0, _checkpoint, new InMemoryCheckpoint(), new InMemoryCheckpoint(-1), new InMemoryCheckpoint(-1))); db.Open(); var tf = new TFChunkWriter(db); tf.Open(); var record = new PrepareLogRecord(logPosition: 0, correlationId: _correlationId, eventId: _eventId, transactionPosition: 0, transactionOffset: 0, eventStreamId: "WorldEnding", expectedVersion: 1234, timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: "type", data: new byte[] { 1, 2, 3, 4, 5 }, metadata: new byte[] { 7, 17 }); long tmp; tf.Write(record, out tmp); tf.Close(); db.Dispose(); Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix(), _checkpoint.Read()); using (var filestream = File.Open(GetFilePathFor("chunk-000000.000000"), FileMode.Open, FileAccess.Read)) { filestream.Position = ChunkHeader.Size; var reader = new BinaryReader(filestream); reader.ReadInt32(); var read = LogRecord.ReadFrom(reader); Assert.AreEqual(record, read); } }
public virtual void Handle(SystemMessage.StateChangeMessage message) { _vnodeState = message.State; switch (message.State) { case VNodeState.Leader: { _indexWriter.Reset(); EpochManager.WriteNewEpoch(((SystemMessage.BecomeLeader)message).EpochNumber); break; } case VNodeState.ShuttingDown: { Writer.Close(); break; } } }
public virtual void Handle(SystemMessage.StateChangeMessage message) { _vnodeState = message.State; switch (message.State) { case VNodeState.Master: { _indexWriter.Reset(); EpochManager.WriteNewEpoch(); // forces flush break; } case VNodeState.ShuttingDown: { Writer.Close(); break; } } }
public void a_record_can_be_written() { _checkpoint = new InMemoryCheckpoint(0); var db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, _checkpoint, new InMemoryCheckpoint())); db.Open(); var tf = new TFChunkWriter(db); tf.Open(); var recordFactory = LogFormatHelper <TLogFormat, TStreamId> .RecordFactory; var streamId = LogFormatHelper <TLogFormat, TStreamId> .StreamId; var eventTypeId = LogFormatHelper <TLogFormat, TStreamId> .EventTypeId; var record = LogRecord.Prepare( factory: recordFactory, logPosition: 0, correlationId: _correlationId, eventId: _eventId, transactionPos: 0, transactionOffset: 0, eventStreamId: streamId, expectedVersion: 1234, timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: eventTypeId, data: new byte[] { 1, 2, 3, 4, 5 }, metadata: new byte[] { 7, 17 }); long tmp; tf.Write(record, out tmp); tf.Close(); db.Dispose(); Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix(), _checkpoint.Read()); using (var filestream = File.Open(GetFilePathFor("chunk-000000.000000"), FileMode.Open, FileAccess.Read)) { filestream.Position = ChunkHeader.Size; var reader = new BinaryReader(filestream); reader.ReadInt32(); var read = LogRecord.ReadFrom(reader, (int)reader.BaseStream.Length); Assert.AreEqual(record, read); } }
private static string CreateExistingDatabase(Action <ICheckpoint, TFChunkWriter> runScenario) { var databasePath = $"{nameof(StreamRevisionGreaterThanIntMaxValueFixture)}-{Guid.NewGuid():n}"; var bus = new InMemoryBus("bus"); var ioDispatcher = new IODispatcher(bus, new PublishEnvelope(bus)); if (!Directory.Exists(databasePath)) { Directory.CreateDirectory(databasePath); } var writerCheckFilename = Path.Combine(databasePath, Checkpoint.Writer + ".chk"); var chaserCheckFilename = Path.Combine(databasePath, Checkpoint.Chaser + ".chk"); var writerCheckpoint = Runtime.IsMono ? (ICheckpoint) new FileCheckpoint(writerCheckFilename, Checkpoint.Writer, cached: true) : new MemoryMappedFileCheckpoint(writerCheckFilename, Checkpoint.Writer, cached: true); var chaserCheckpoint = Runtime.IsMono ? (ICheckpoint) new FileCheckpoint(chaserCheckFilename, Checkpoint.Chaser, cached: true) : new MemoryMappedFileCheckpoint(chaserCheckFilename, Checkpoint.Chaser, cached: true); var db = new TFChunkDb(CreateDbConfig(databasePath, writerCheckpoint, chaserCheckpoint, TFConsts.ChunkSize)); db.Open(); // create DB var writer = new TFChunkWriter(db); writer.Open(); runScenario(writerCheckpoint, writer); writer.Close(); writerCheckpoint.Flush(); chaserCheckpoint.Write(writerCheckpoint.Read()); chaserCheckpoint.Flush(); db.Close(); return(databasePath); }
public override async Task TestFixtureSetUp() { await base.TestFixtureSetUp(); var indexDirectory = GetFilePathFor("index"); _logFormat = LogFormatHelper <TLogFormat, TStreamId> .LogFormatFactory.Create(new() { IndexDirectory = indexDirectory, }); var writerCheckpoint = new InMemoryCheckpoint(0); var chaserCheckpoint = new InMemoryCheckpoint(0); var bus = new InMemoryBus("bus"); new IODispatcher(bus, new PublishEnvelope(bus)); _db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, writerCheckpoint, chaserCheckpoint)); _db.Open(); // create db Writer = new TFChunkWriter(_db); Writer.Open(); SetupDB(); Writer.Close(); Writer = null; writerCheckpoint.Flush(); chaserCheckpoint.Write(writerCheckpoint.Read()); chaserCheckpoint.Flush(); var readers = new ObjectPool <ITransactionFileReader>("Readers", 2, 5, () => new TFChunkReader(_db, _db.Config.WriterCheckpoint)); var lowHasher = _logFormat.LowHasher; var highHasher = _logFormat.HighHasher; var emptyStreamId = _logFormat.EmptyStreamId; _tableIndex = new TableIndex <TStreamId>(indexDirectory, lowHasher, highHasher, emptyStreamId, () => new HashListMemTable(IndexBitnessVersion, MaxEntriesInMemTable * 2), () => new TFReaderLease(readers), IndexBitnessVersion, int.MaxValue, Constants.PTableMaxReaderCountDefault, MaxEntriesInMemTable); _logFormat.StreamNamesProvider.SetTableIndex(_tableIndex); var readIndex = new ReadIndex <TStreamId>(new NoopPublisher(), readers, _tableIndex, _logFormat.StreamNameIndexConfirmer, _logFormat.StreamIds, _logFormat.StreamNamesProvider, _logFormat.EmptyStreamId, _logFormat.StreamIdValidator, _logFormat.StreamIdSizer, _logFormat.StreamExistenceFilter, _logFormat.StreamExistenceFilterReader, _logFormat.EventTypeIndexConfirmer, streamInfoCacheCapacity: 100_000, additionalCommitChecks: PerformAdditionalCommitChecks, metastreamMaxCount: MetastreamMaxCount, hashCollisionReadLimit: Opts.HashCollisionReadLimitDefault, skipIndexScanOnReads: Opts.SkipIndexScanOnReadsDefault, replicationCheckpoint: _db.Config.ReplicationCheckpoint, indexCheckpoint: _db.Config.IndexCheckpoint); readIndex.IndexCommitter.Init(chaserCheckpoint.Read()); ReadIndex = readIndex; _tableIndex.Close(false); Writer = new TFChunkWriter(_db); Writer.Open(); Given(); Writer.Close(); Writer = null; writerCheckpoint.Flush(); chaserCheckpoint.Write(writerCheckpoint.Read()); chaserCheckpoint.Flush(); _tableIndex = new TableIndex <TStreamId>(indexDirectory, lowHasher, highHasher, emptyStreamId, () => new HashListMemTable(IndexBitnessVersion, MaxEntriesInMemTable * 2), () => new TFReaderLease(readers), IndexBitnessVersion, int.MaxValue, Constants.PTableMaxReaderCountDefault, MaxEntriesInMemTable); readIndex = new ReadIndex <TStreamId>(new NoopPublisher(), readers, _tableIndex, _logFormat.StreamNameIndexConfirmer, _logFormat.StreamIds, _logFormat.StreamNamesProvider, _logFormat.EmptyStreamId, _logFormat.StreamIdValidator, _logFormat.StreamIdSizer, _logFormat.StreamExistenceFilter, _logFormat.StreamExistenceFilterReader, _logFormat.EventTypeIndexConfirmer, streamInfoCacheCapacity: 100_000, additionalCommitChecks: PerformAdditionalCommitChecks, metastreamMaxCount: MetastreamMaxCount, hashCollisionReadLimit: Opts.HashCollisionReadLimitDefault, skipIndexScanOnReads: Opts.SkipIndexScanOnReadsDefault, replicationCheckpoint: _db.Config.ReplicationCheckpoint, indexCheckpoint: _db.Config.IndexCheckpoint); readIndex.IndexCommitter.Init(chaserCheckpoint.Read()); ReadIndex = readIndex; }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); WriterCheckpoint = new InMemoryCheckpoint(0); ChaserCheckpoint = new InMemoryCheckpoint(0); Bus = new InMemoryBus("bus"); IODispatcher = new IODispatcher(Bus, new PublishEnvelope(Bus)); Db = new TFChunkDb(new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 10000, 0, WriterCheckpoint, ChaserCheckpoint, new InMemoryCheckpoint(-1), new InMemoryCheckpoint(-1))); Db.Open(); // create db Writer = new TFChunkWriter(Db); Writer.Open(); WriteTestScenario(); Writer.Close(); Writer = null; WriterCheckpoint.Flush(); ChaserCheckpoint.Write(WriterCheckpoint.Read()); ChaserCheckpoint.Flush(); var readers = new ObjectPool <ITransactionFileReader>("Readers", 2, 5, () => new TFChunkReader(Db, Db.Config.WriterCheckpoint)); var lowHasher = new XXHashUnsafe(); var highHasher = new Murmur3AUnsafe(); TableIndex = new TableIndex(GetFilePathFor("index"), lowHasher, highHasher, () => new HashListMemTable(IndexBitnessVersion, MaxEntriesInMemTable * 2), () => new TFReaderLease(readers), IndexBitnessVersion, MaxEntriesInMemTable); ReadIndex = new ReadIndex(new NoopPublisher(), readers, TableIndex, 0, additionalCommitChecks: PerformAdditionalCommitChecks, metastreamMaxCount: MetastreamMaxCount, hashCollisionReadLimit: Opts.HashCollisionReadLimitDefault); ReadIndex.Init(ChaserCheckpoint.Read()); // scavenge must run after readIndex is built if (_scavenge) { if (_completeLastChunkOnScavenge) { Db.Manager.GetChunk(Db.Manager.ChunksCount - 1).Complete(); } _scavenger = new TFChunkScavenger(Db, IODispatcher, TableIndex, ReadIndex, Guid.NewGuid(), "fakeNodeIp"); _scavenger.Scavenge(alwaysKeepScavenged: true, mergeChunks: _mergeChunks); } }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); var writerCheckpoint = new InMemoryCheckpoint(0); var chaserCheckpoint = new InMemoryCheckpoint(0); var bus = new InMemoryBus("bus"); new IODispatcher(bus, new PublishEnvelope(bus)); _db = new TFChunkDb(new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 10000, 0, writerCheckpoint, chaserCheckpoint, new InMemoryCheckpoint(-1), new InMemoryCheckpoint(-1))); _db.Open(); // create db Writer = new TFChunkWriter(_db); Writer.Open(); SetupDB(); Writer.Close(); Writer = null; writerCheckpoint.Flush(); chaserCheckpoint.Write(writerCheckpoint.Read()); chaserCheckpoint.Flush(); var readers = new ObjectPool <ITransactionFileReader>("Readers", 2, 5, () => new TFChunkReader(_db, _db.Config.WriterCheckpoint)); var lowHasher = new XXHashUnsafe(); var highHasher = new Murmur3AUnsafe(); _tableIndex = new TableIndex(GetFilePathFor("index"), lowHasher, highHasher, () => new HashListMemTable(IndexBitnessVersion, MaxEntriesInMemTable * 2), () => new TFReaderLease(readers), IndexBitnessVersion, MaxEntriesInMemTable); ReadIndex = new ReadIndex(new NoopPublisher(), readers, _tableIndex, EventStore.Core.Settings.ESConsts.StreamInfoCacheCapacity, additionalCommitChecks: PerformAdditionalCommitChecks, metastreamMaxCount: MetastreamMaxCount, hashCollisionReadLimit: Opts.HashCollisionReadLimitDefault, skipIndexScanOnReads: Opts.SkipIndexScanOnReadsDefault); ReadIndex.Init(chaserCheckpoint.Read()); _tableIndex.Close(false); Writer = new TFChunkWriter(_db); Writer.Open(); Given(); Writer.Close(); Writer = null; writerCheckpoint.Flush(); chaserCheckpoint.Write(writerCheckpoint.Read()); chaserCheckpoint.Flush(); _tableIndex = new TableIndex(GetFilePathFor("index"), lowHasher, highHasher, () => new HashListMemTable(IndexBitnessVersion, MaxEntriesInMemTable * 2), () => new TFReaderLease(readers), IndexBitnessVersion, MaxEntriesInMemTable); ReadIndex = new ReadIndex(new NoopPublisher(), readers, _tableIndex, EventStore.Core.Settings.ESConsts.StreamInfoCacheCapacity, additionalCommitChecks: PerformAdditionalCommitChecks, metastreamMaxCount: MetastreamMaxCount, hashCollisionReadLimit: Opts.HashCollisionReadLimitDefault, skipIndexScanOnReads: Opts.SkipIndexScanOnReadsDefault); ReadIndex.Init(chaserCheckpoint.Read()); }
public void a_record_is_not_written_at_first_but_written_on_second_try() { var filename1 = GetFilePathFor("chunk-000000.000000"); var filename2 = GetFilePathFor("chunk-000001.000000"); var chunkHeader = new ChunkHeader(TFChunk.CurrentChunkVersion, 10000, 0, 0, false, Guid.NewGuid()); var chunkBytes = chunkHeader.AsByteArray(); var bytes = new byte[ChunkHeader.Size + 10000 + ChunkFooter.Size]; Buffer.BlockCopy(chunkBytes, 0, bytes, 0, chunkBytes.Length); File.WriteAllBytes(filename1, bytes); _checkpoint = new InMemoryCheckpoint(0); var db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, _checkpoint, new InMemoryCheckpoint())); db.Open(); var tf = new TFChunkWriter(db); long pos; var record1 = new PrepareLogRecord(logPosition: 0, correlationId: _correlationId, eventId: _eventId, expectedVersion: 1234, transactionPosition: 0, transactionOffset: 0, eventStreamId: "WorldEnding", timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: "type", data: new byte[] { 1, 2, 3, 4, 5 }, metadata: new byte[8000]); Assert.IsTrue(tf.Write(record1, out pos)); // almost fill up first chunk var record2 = new PrepareLogRecord(logPosition: pos, correlationId: _correlationId, eventId: _eventId, expectedVersion: 1234, transactionPosition: pos, transactionOffset: 0, eventStreamId: "WorldEnding", timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: "type", data: new byte[] { 1, 2, 3, 4, 5 }, metadata: new byte[8000]); Assert.IsFalse(tf.Write(record2, out pos)); // chunk has too small space var record3 = new PrepareLogRecord(logPosition: pos, correlationId: _correlationId, eventId: _eventId, expectedVersion: 1234, transactionPosition: pos, transactionOffset: 0, eventStreamId: "WorldEnding", timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: "type", data: new byte[] { 1, 2, 3, 4, 5 }, metadata: new byte[2000]); Assert.IsTrue(tf.Write(record3, out pos)); tf.Close(); db.Dispose(); Assert.AreEqual(record3.GetSizeWithLengthPrefixAndSuffix() + 10000, _checkpoint.Read()); using (var filestream = File.Open(filename2, FileMode.Open, FileAccess.Read)) { filestream.Seek(ChunkHeader.Size + sizeof(int), SeekOrigin.Begin); var reader = new BinaryReader(filestream); var read = LogRecord.ReadFrom(reader); Assert.AreEqual(record3, read); } }
public override async Task TestFixtureSetUp() { await base.TestFixtureSetUp(); var indexDirectory = GetFilePathFor("index"); _logFormat = LogFormatHelper <TLogFormat, TStreamId> .LogFormatFactory.Create(new() { IndexDirectory = indexDirectory, }); _recordFactory = _logFormat.RecordFactory; _streamNameIndex = _logFormat.StreamNameIndex; _eventTypeIndex = _logFormat.EventTypeIndex; WriterCheckpoint = new InMemoryCheckpoint(0); ChaserCheckpoint = new InMemoryCheckpoint(0); Db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, WriterCheckpoint, ChaserCheckpoint, replicationCheckpoint: new InMemoryCheckpoint(-1), chunkSize: _chunkSize)); Db.Open(); // create db Writer = new TFChunkWriter(Db); Writer.Open(); WriteTestScenario(); Writer.Close(); Writer = null; WriterCheckpoint.Flush(); ChaserCheckpoint.Write(WriterCheckpoint.Read()); ChaserCheckpoint.Flush(); var readers = new ObjectPool <ITransactionFileReader>("Readers", 2, 5, () => new TFChunkReader(Db, Db.Config.WriterCheckpoint)); var lowHasher = _logFormat.LowHasher; var highHasher = _logFormat.HighHasher; var emptyStreamId = _logFormat.EmptyStreamId; TableIndex = new TableIndex <TStreamId>(indexDirectory, lowHasher, highHasher, emptyStreamId, () => new HashListMemTable(IndexBitnessVersion, MaxEntriesInMemTable * 2), () => new TFReaderLease(readers), IndexBitnessVersion, int.MaxValue, Constants.PTableMaxReaderCountDefault, MaxEntriesInMemTable); _logFormat.StreamNamesProvider.SetTableIndex(TableIndex); var readIndex = new ReadIndex <TStreamId>(new NoopPublisher(), readers, TableIndex, _logFormat.StreamNameIndexConfirmer, _logFormat.StreamIds, _logFormat.StreamNamesProvider, _logFormat.EmptyStreamId, _logFormat.StreamIdValidator, _logFormat.StreamIdSizer, _logFormat.StreamExistenceFilter, _logFormat.StreamExistenceFilterReader, _logFormat.EventTypeIndexConfirmer, streamInfoCacheCapacity: StreamInfoCacheCapacity, additionalCommitChecks: PerformAdditionalCommitChecks, metastreamMaxCount: MetastreamMaxCount, hashCollisionReadLimit: Opts.HashCollisionReadLimitDefault, skipIndexScanOnReads: Opts.SkipIndexScanOnReadsDefault, replicationCheckpoint: Db.Config.ReplicationCheckpoint, indexCheckpoint: Db.Config.IndexCheckpoint); readIndex.IndexCommitter.Init(ChaserCheckpoint.Read()); ReadIndex = readIndex; // scavenge must run after readIndex is built if (_scavenge) { if (_completeLastChunkOnScavenge) { Db.Manager.GetChunk(Db.Manager.ChunksCount - 1).Complete(); } _scavenger = new TFChunkScavenger <TStreamId>(Db, new FakeTFScavengerLog(), TableIndex, ReadIndex, _logFormat.Metastreams); await _scavenger.Scavenge(alwaysKeepScavenged : true, mergeChunks : _mergeChunks); } }