public void with_a_writer_checksum_of_nonzero_and_no_files_a_corrupted_database_exception_is_thrown() { var db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, 500)); var exc = Assert.Throws <CorruptDatabaseException>(() => db.Open()); Assert.IsInstanceOf <ChunkNotFoundException>(exc.InnerException); db.Dispose(); }
public override async Task TestFixtureSetUp() { await base.TestFixtureSetUp(); WriterCheckpoint = new InMemoryCheckpoint(0); ChaserCheckpoint = new InMemoryCheckpoint(0); Db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, WriterCheckpoint, ChaserCheckpoint, replicationCheckpoint: new InMemoryCheckpoint(-1))); Db.Open(); // create db Writer = new TFChunkWriter(Db); Writer.Open(); WriteTestScenario(); Writer.Close(); Writer = null; WriterCheckpoint.Flush(); ChaserCheckpoint.Write(WriterCheckpoint.Read()); ChaserCheckpoint.Flush(); var readers = new ObjectPool <ITransactionFileReader>("Readers", 2, 5, () => new TFChunkReader(Db, Db.Config.WriterCheckpoint)); var lowHasher = new XXHashUnsafe(); var highHasher = new Murmur3AUnsafe(); TableIndex = new TableIndex(GetFilePathFor("index"), lowHasher, highHasher, () => new HashListMemTable(IndexBitnessVersion, MaxEntriesInMemTable * 2), () => new TFReaderLease(readers), IndexBitnessVersion, int.MaxValue, Constants.PTableMaxReaderCountDefault, MaxEntriesInMemTable); ReadIndex = new ReadIndex(new NoopPublisher(), readers, TableIndex, 0, additionalCommitChecks: PerformAdditionalCommitChecks, metastreamMaxCount: MetastreamMaxCount, hashCollisionReadLimit: Opts.HashCollisionReadLimitDefault, skipIndexScanOnReads: Opts.SkipIndexScanOnReadsDefault, replicationCheckpoint: Db.Config.ReplicationCheckpoint); ((ReadIndex)ReadIndex).IndexCommitter.Init(ChaserCheckpoint.Read()); // scavenge must run after readIndex is built if (_scavenge) { if (_completeLastChunkOnScavenge) { Db.Manager.GetChunk(Db.Manager.ChunksCount - 1).Complete(); } _scavenger = new TFChunkScavenger(Db, new FakeTFScavengerLog(), TableIndex, ReadIndex); await _scavenger.Scavenge(alwaysKeepScavenged : true, mergeChunks : _mergeChunks); } }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); WriterCheckpoint = new InMemoryCheckpoint(0); ChaserCheckpoint = new InMemoryCheckpoint(0); Db = new TFChunkDb(new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 10000, 0, WriterCheckpoint, ChaserCheckpoint, new InMemoryCheckpoint(-1), new InMemoryCheckpoint(-1))); Db.Open(); // create db Writer = new TFChunkWriter(Db); Writer.Open(); WriteTestScenario(); Writer.Close(); Writer = null; WriterCheckpoint.Flush(); ChaserCheckpoint.Write(WriterCheckpoint.Read()); ChaserCheckpoint.Flush(); var readers = new ObjectPool <ITransactionFileReader>("Readers", 2, 2, () => new TFChunkReader(Db, Db.Config.WriterCheckpoint)); TableIndex = new TableIndex(GetFilePathFor("index"), () => new HashListMemTable(MaxEntriesInMemTable * 2), () => new TFReaderLease(readers), MaxEntriesInMemTable); var hasher = new ByLengthHasher(); ReadIndex = new ReadIndex(new NoopPublisher(), readers, TableIndex, hasher, 0, additionalCommitChecks: true, metastreamMaxCount: MetastreamMaxCount); ReadIndex.Init(ChaserCheckpoint.Read()); // scavenge must run after readIndex is built if (_scavenge) { if (_completeLastChunkOnScavenge) { Db.Manager.GetChunk(Db.Manager.ChunksCount - 1).Complete(); } _scavenger = new TFChunkScavenger(Db, TableIndex, hasher, ReadIndex); _scavenger.Scavenge(alwaysKeepScavenged: true, mergeChunks: _mergeChunks); } }
public void allows_with_exactly_enough_file_to_reach_checksum() { var config = TFChunkHelper.CreateDbConfig(PathName, 10000); using (var db = new TFChunkDb(config)) { DbUtil.CreateSingleChunk(config, 0, GetFilePathFor("chunk-000000.000000")); Assert.DoesNotThrow(() => db.Open(verifyHash: false)); } }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); string dbPath = Path.Combine(PathName, string.Format("mini-node-db-{0}", Guid.NewGuid())); Bus = new InMemoryBus("bus"); IODispatcher = new IODispatcher(Bus, new PublishEnvelope(Bus)); if (!Directory.Exists(dbPath)) { Directory.CreateDirectory(dbPath); } var writerCheckFilename = Path.Combine(dbPath, Checkpoint.Writer + ".chk"); var chaserCheckFilename = Path.Combine(dbPath, Checkpoint.Chaser + ".chk"); if (Runtime.IsMono) { WriterCheckpoint = new FileCheckpoint(writerCheckFilename, Checkpoint.Writer, cached: true); ChaserCheckpoint = new FileCheckpoint(chaserCheckFilename, Checkpoint.Chaser, cached: true); } else { WriterCheckpoint = new MemoryMappedFileCheckpoint(writerCheckFilename, Checkpoint.Writer, cached: true); ChaserCheckpoint = new MemoryMappedFileCheckpoint(chaserCheckFilename, Checkpoint.Chaser, cached: true); } Db = new TFChunkDb(new TFChunkDbConfig(dbPath, new VersionedPatternFileNamingStrategy(dbPath, "chunk-"), TFConsts.ChunkSize, 0, WriterCheckpoint, ChaserCheckpoint, new InMemoryCheckpoint(-1), new InMemoryCheckpoint(-1), inMemDb: false)); Db.Open(); // create DB Writer = new TFChunkWriter(Db); Writer.Open(); WriteTestScenario(); Writer.Close(); Writer = null; WriterCheckpoint.Flush(); ChaserCheckpoint.Write(WriterCheckpoint.Read()); ChaserCheckpoint.Flush(); Db.Close(); // start node with our created DB Node = new MiniNode(PathName, inMemDb: false, dbPath: dbPath); Node.Start(); Given(); }
public void allows_no_files_when_checkpoint_is_zero() { var config = TFChunkHelper.CreateDbConfig(PathName, 0); using (var db = new TFChunkDb(config)) { Assert.DoesNotThrow(() => db.Open(verifyHash: false)); Assert.IsTrue(File.Exists(GetFilePathFor("chunk-000000.000000"))); } }
public void allows_first_correct_ongoing_chunk_when_checkpoint_is_zero() { var config = TFChunkHelper.CreateDbConfig(PathName, 0); using (var db = new TFChunkDb(config)) { DbUtil.CreateOngoingChunk(config, 0, GetFilePathFor("chunk-000000.000000")); Assert.DoesNotThrow(() => db.Open(verifyHash: false)); } }
allows_last_chunk_to_be_not_completed_when_checksum_is_exactly_in_between_two_chunks_and_no_next_chunk_exists() { var config = TFChunkHelper.CreateDbConfig(PathName, 10000); using (var db = new TFChunkDb(config)) { DbUtil.CreateOngoingChunk(config, 0, GetFilePathFor("chunk-000000.000000")); Assert.DoesNotThrow(() => db.Open(verifyHash: false)); } }
public override async Task TestFixtureSetUp() { await base.TestFixtureSetUp(); string dbPath = Path.Combine(PathName, string.Format("mini-node-db-{0}", Guid.NewGuid())); _logFormatFactory = LogFormatHelper <TLogFormat, TStreamId> .LogFormatFactory.Create(new() { IndexDirectory = GetFilePathFor("index"), }); Bus = new InMemoryBus("bus"); IODispatcher = new IODispatcher(Bus, new PublishEnvelope(Bus)); if (!Directory.Exists(dbPath)) { Directory.CreateDirectory(dbPath); } var writerCheckFilename = Path.Combine(dbPath, Checkpoint.Writer + ".chk"); var chaserCheckFilename = Path.Combine(dbPath, Checkpoint.Chaser + ".chk"); WriterCheckpoint = new MemoryMappedFileCheckpoint(writerCheckFilename, Checkpoint.Writer, cached: true); ChaserCheckpoint = new MemoryMappedFileCheckpoint(chaserCheckFilename, Checkpoint.Chaser, cached: true); Db = new TFChunkDb(TFChunkHelper.CreateDbConfig(dbPath, WriterCheckpoint, ChaserCheckpoint, TFConsts.ChunkSize)); Db.Open(); // create DB Writer = new TFChunkWriter(Db); Writer.Open(); var pm = _logFormatFactory.CreatePartitionManager( reader: new TFChunkReader(Db, WriterCheckpoint), writer: Writer); pm.Initialize(); WriteTestScenario(); Writer.Close(); Writer = null; WriterCheckpoint.Flush(); ChaserCheckpoint.Write(WriterCheckpoint.Read()); ChaserCheckpoint.Flush(); Db.Close(); // start node with our created DB Node = new MiniNode <TLogFormat, TStreamId>(PathName, inMemDb: false, dbPath: dbPath); await Node.Start(); try { await Given().WithTimeout(); } catch (Exception ex) { throw new Exception("Given Failed", ex); } }
public void detect_no_database() { var config = TFChunkHelper.CreateSizedDbConfig(PathName, 4000, chunkSize: 1000); using (var db = new TFChunkDb(config)) { Assert.That(() => db.Open(verifyHash: false), Throws.Exception.InstanceOf <CorruptDatabaseException>() .With.InnerException.InstanceOf <ChunkNotFoundException>()); } }
public void when_an_epoch_checksum_is_ahead_of_writer_checksum_throws_corrupt_database_exception() { var config = TFChunkHelper.CreateDbConfig(PathName, 0, 0, 11); using (var db = new TFChunkDb(config)) { Assert.That(() => db.Open(verifyHash: false), Throws.Exception.InstanceOf <CorruptDatabaseException>() .With.InnerException.InstanceOf <ReaderCheckpointHigherThanWriterException>()); } }
public void allows_next_new_chunk_when_checksum_is_exactly_in_between_two_chunks() { var config = TFChunkHelper.CreateDbConfig(PathName, 10000); using (var db = new TFChunkDb(config)) { DbUtil.CreateSingleChunk(config, 0, GetFilePathFor("chunk-000000.000000")); DbUtil.CreateOngoingChunk(config, 1, GetFilePathFor("chunk-000001.000000")); Assert.DoesNotThrow(() => db.Open(verifyHash: false)); } }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); _db = new TFChunkDb(new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 16 * 1024, 0, new InMemoryCheckpoint(), new InMemoryCheckpoint(), new InMemoryCheckpoint(-1), new InMemoryCheckpoint(-1))); _db.Open(); var chunk = _db.Manager.GetChunkFor(0); _p1 = LogRecord.SingleWrite(0, Guid.NewGuid(), Guid.NewGuid(), "es-to-scavenge", ExpectedVersion.Any, "et1", new byte[] { 0, 1, 2 }, new byte[] { 5, 7 }); _res1 = chunk.TryAppend(_p1); _c1 = LogRecord.Commit(_res1.NewPosition, Guid.NewGuid(), _p1.LogPosition, 0); _cres1 = chunk.TryAppend(_c1); _p2 = LogRecord.SingleWrite(_cres1.NewPosition, Guid.NewGuid(), Guid.NewGuid(), "es-to-scavenge", ExpectedVersion.Any, "et1", new byte[] { 0, 1, 2 }, new byte[] { 5, 7 }); _res2 = chunk.TryAppend(_p2); _c2 = LogRecord.Commit(_res2.NewPosition, Guid.NewGuid(), _p2.LogPosition, 1); _cres2 = chunk.TryAppend(_c2); _p3 = LogRecord.SingleWrite(_cres2.NewPosition, Guid.NewGuid(), Guid.NewGuid(), "es-to-scavenge", ExpectedVersion.Any, "et1", new byte[] { 0, 1, 2 }, new byte[] { 5, 7 }); _res3 = chunk.TryAppend(_p3); _c3 = LogRecord.Commit(_res3.NewPosition, Guid.NewGuid(), _p3.LogPosition, 2); _cres3 = chunk.TryAppend(_c3); chunk.Complete(); _db.Config.WriterCheckpoint.Write(chunk.ChunkHeader.ChunkEndPosition); _db.Config.WriterCheckpoint.Flush(); _db.Config.ChaserCheckpoint.Write(chunk.ChunkHeader.ChunkEndPosition); _db.Config.ChaserCheckpoint.Flush(); var bus = new InMemoryBus("Bus"); var ioDispatcher = new IODispatcher(bus, new PublishEnvelope(bus)); var scavenger = new TFChunkScavenger(_db, ioDispatcher, new FakeTableIndex(), new FakeReadIndex(x => x == "es-to-scavenge"), Guid.NewGuid(), "fakeNodeIp"); scavenger.Scavenge(alwaysKeepScavenged: true, mergeChunks: false); _scavengedChunk = _db.Manager.GetChunk(0); }
public void a_record_can_be_written() { var filename = GetFilePathFor("chunk-000000.000000"); var chunkHeader = new ChunkHeader(TFChunk.CurrentChunkVersion, 10000, 0, 0, false, Guid.NewGuid()); var chunkBytes = chunkHeader.AsByteArray(); var buf = new byte[ChunkHeader.Size + ChunkFooter.Size + chunkHeader.ChunkSize]; Buffer.BlockCopy(chunkBytes, 0, buf, 0, chunkBytes.Length); File.WriteAllBytes(filename, buf); _checkpoint = new InMemoryCheckpoint(137); var db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, _checkpoint, new InMemoryCheckpoint(), chunkSize: chunkHeader.ChunkSize)); db.Open(); var bytes = new byte[3994]; // this gives exactly 4097 size of record, with 3993 (rec size 4096) everything works fine! new Random().NextBytes(bytes); var writer = new TFChunkWriter(db); var logFormat = LogFormatHelper <TLogFormat, TStreamId> .LogFormat; logFormat.StreamNameIndex.GetOrAddId("WorldEnding", out var streamId, out _, out _); var record = LogRecord.Prepare( factory: logFormat.RecordFactory, logPosition: 137, correlationId: _correlationId, eventId: _eventId, transactionPos: 789, transactionOffset: 543, eventStreamId: streamId, expectedVersion: 1234, timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.SingleWrite, eventType: "type", data: bytes, metadata: new byte[] { 0x07, 0x17 }); long pos; Assert.IsTrue(writer.Write(record, out pos)); writer.Close(); db.Dispose(); Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix() + 137, _checkpoint.Read()); using (var filestream = File.Open(filename, FileMode.Open, FileAccess.Read)) { filestream.Seek(ChunkHeader.Size + 137 + sizeof(int), SeekOrigin.Begin); var reader = new BinaryReader(filestream); var read = LogRecord.ReadFrom(reader, (int)reader.BaseStream.Length); Assert.AreEqual(record, read); } }
public override async Task TestFixtureSetUp() { await base.TestFixtureSetUp(); _mainBus = new InMemoryBus(nameof(when_having_an_epoch_manager_and_empty_tf_log)); _mainBus.Subscribe(new AdHocHandler <SystemMessage.EpochWritten>(m => _published.Add(m))); _db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, 0)); _db.Open(); _reader = new TFChunkReader(_db, _db.Config.WriterCheckpoint); _writer = new TFChunkWriter(_db); }
public void does_not_allow_first_completed_chunk_when_checkpoint_is_zero() { var config = TFChunkHelper.CreateDbConfig(PathName, 0); using (var db = new TFChunkDb(config)) { DbUtil.CreateSingleChunk(config, 0, GetFilePathFor("chunk-000000.000000")); Assert.That(() => db.Open(verifyHash: false), Throws.Exception.InstanceOf <CorruptDatabaseException>() .With.InnerException.InstanceOf <BadChunkInDatabaseException>()); } }
public void with_file_of_wrong_size_database_corruption_is_detected() { var config = TFChunkHelper.CreateDbConfig(PathName, 500); using (var db = new TFChunkDb(config)) { File.WriteAllText(GetFilePathFor("chunk-000000.000000"), "this is just some test blahbydy blah"); Assert.That(() => db.Open(verifyHash: false), Throws.Exception.InstanceOf <CorruptDatabaseException>() .With.InnerException.InstanceOf <BadChunkInDatabaseException>()); } }
public void when_in_brand_new_extraneous_files_throws_corrupt_database_exception() { var config = TFChunkHelper.CreateDbConfig(PathName, 0); using (var db = new TFChunkDb(config)) { DbUtil.CreateSingleChunk(config, 4, GetFilePathFor("chunk-000004.000000")); Assert.That(() => db.Open(verifyHash: false), Throws.Exception.InstanceOf <CorruptDatabaseException>() .With.InnerException.InstanceOf <ExtraneousFileFoundException>()); } }
public override void TestFixtureTearDown() { using (var db = new TFChunkDb(_config)) { Assert.DoesNotThrow(() => db.Open(verifyHash: false)); } Assert.IsTrue(File.Exists(GetFilePathFor("chunk-000000.000000"))); Assert.AreEqual(1, Directory.GetFiles(PathName, "*").Length); base.TestFixtureTearDown(); }
public override async Task TestFixtureSetUp() { await base.TestFixtureSetUp(); string dbPath = Path.Combine(PathName, string.Format("mini-node-db-{0}", Guid.NewGuid())); Bus = new InMemoryBus("bus"); IODispatcher = new IODispatcher(Bus, new PublishEnvelope(Bus)); if (!Directory.Exists(dbPath)) { Directory.CreateDirectory(dbPath); } var writerCheckFilename = Path.Combine(dbPath, Checkpoint.Writer + ".chk"); var chaserCheckFilename = Path.Combine(dbPath, Checkpoint.Chaser + ".chk"); if (Runtime.IsMono) { WriterCheckpoint = new FileCheckpoint(writerCheckFilename, Checkpoint.Writer, cached: true); ChaserCheckpoint = new FileCheckpoint(chaserCheckFilename, Checkpoint.Chaser, cached: true); } else { WriterCheckpoint = new MemoryMappedFileCheckpoint(writerCheckFilename, Checkpoint.Writer, cached: true); ChaserCheckpoint = new MemoryMappedFileCheckpoint(chaserCheckFilename, Checkpoint.Chaser, cached: true); } Db = new TFChunkDb(TFChunkHelper.CreateDbConfig(dbPath, WriterCheckpoint, ChaserCheckpoint, TFConsts.ChunkSize)); Db.Open(); // create DB Writer = new TFChunkWriter(Db); Writer.Open(); WriteTestScenario(); Writer.Close(); Writer = null; WriterCheckpoint.Flush(); ChaserCheckpoint.Write(WriterCheckpoint.Read()); ChaserCheckpoint.Flush(); Db.Close(); // start node with our created DB Node = new MiniNode(PathName, inMemDb: false, dbPath: dbPath); await Node.Start(); try { await Given().WithTimeout(); } catch (Exception ex) { throw new Exception("Given Failed", ex); } }
public override async Task TestFixtureSetUp() { await base.TestFixtureSetUp(); _mainBus = new InMemoryBus(nameof(when_starting_having_TFLog_with_no_epochs <TLogFormat, TStreamId>)); _mainBus.Subscribe(new AdHocHandler <SystemMessage.EpochWritten>(m => _published.Add(m))); _db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, 0)); _db.Open(); _reader = new TFChunkReader(_db, _db.Config.WriterCheckpoint); _writer = new TFChunkWriter(_db); }
public void with_checksum_inside_multi_chunk_throws() { var config = TFChunkHelper.CreateDbConfig(PathName, 25000); using (var db = new TFChunkDb(config)) { DbUtil.CreateMultiChunk(config, 0, 2, GetFilePathFor("chunk-000000.000000")); Assert.That(() => db.Open(verifyHash: false), Throws.Exception.InstanceOf <CorruptDatabaseException>() .With.InnerException.InstanceOf <ChunkNotFoundException>()); } }
public void when_in_multiple_missing_file_throws_corrupt_database_exception() { var config = TFChunkHelper.CreateDbConfig(PathName, 25000); using (var db = new TFChunkDb(config)) { DbUtil.CreateSingleChunk(config, 0, GetFilePathFor("chunk-000000.000000")); DbUtil.CreateOngoingChunk(config, 2, GetFilePathFor("chunk-000002.000000")); Assert.That(() => db.Open(verifyHash: false), Throws.Exception.InstanceOf <CorruptDatabaseException>() .With.InnerException.InstanceOf <ChunkNotFoundException>()); } }
public void when_prelast_chunk_corrupted_throw_hash_validation_exception() { var config = TFChunkHelper.CreateDbConfig(PathName, 15000); using (var db = new TFChunkDb(config)) { byte[] contents = new byte[config.ChunkSize]; for (var i = 0; i < config.ChunkSize; i++) { contents[i] = 0; } /* * Create a completed chunk and an ongoing chunk */ DbUtil.CreateSingleChunk(config, 0, GetFilePathFor("chunk-000000.000000"), actualDataSize: config.ChunkSize, contents: contents); DbUtil.CreateOngoingChunk(config, 1, GetFilePathFor("chunk-000001.000000")); /** * Corrupt the prelast completed chunk by modifying bytes of its content */ using (Stream stream = File.Open(GetFilePathFor("chunk-000000.000000"), FileMode.Open)) { var data = new byte[3]; data[0] = 1; data[1] = 2; data[2] = 3; stream.Position = ChunkHeader.Size + 15; //arbitrary choice of position to modify stream.Write(data, 0, data.Length); } /** * Exception being thrown in another thread, using the output to check for the exception */ var output = ""; using (StringWriter sw = new StringWriter()) { Console.SetOut(sw); db.Open(verifyHash: true); //arbitrary wait Thread.Sleep(2000); output = sw.ToString(); } var standardOutput = new StreamWriter(Console.OpenStandardOutput()); standardOutput.AutoFlush = true; Console.SetOut(standardOutput); Console.WriteLine(output); Assert.IsTrue(output.Contains("EXCEPTION OCCURRED")); Assert.IsTrue(output.Contains("EventStore.Core.Exceptions.HashValidationException")); } }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); _db = new TFChunkDb( new TFChunkDbConfig( PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 4096, 0, new InMemoryCheckpoint(), new InMemoryCheckpoint(), new InMemoryCheckpoint(-1), new InMemoryCheckpoint(-1))); _db.Open(); var chunk = _db.Manager.GetChunk(0); _records = new LogRecord[RecordsCount]; _results = new RecordWriteResult[RecordsCount]; for (int i = 0; i < _records.Length - 1; ++i) { _records[i] = LogRecord.SingleWrite( i == 0 ? 0 : _results[i - 1].NewPosition, Guid.NewGuid(), Guid.NewGuid(), "es1", ExpectedVersion.Any, "et1", new byte[] { 0, 1, 2 }, new byte[] { 5, 7 }); _results[i] = chunk.TryAppend(_records[i]); } _records[_records.Length - 1] = LogRecord.Prepare( _results[_records.Length - 1 - 1].NewPosition, Guid.NewGuid(), Guid.NewGuid(), _results[_records.Length - 1 - 1].NewPosition, 0, "es1", ExpectedVersion.Any, PrepareFlags.Data, "et1", new byte[] { 0, 1, 2 }, new byte[] { 5, 7 }); _results[_records.Length - 1] = chunk.TryAppend(_records[_records.Length - 1]); chunk.Flush(); _db.Config.WriterCheckpoint.Write(_results[RecordsCount - 1].NewPosition); _db.Config.WriterCheckpoint.Flush(); }
public void when_prelast_chunk_corrupted_throw_hash_validation_exception() { var config = TFChunkHelper.CreateDbConfig(PathName, 15000); var sink = new TestLogEventSink(); using (var log = new LoggerConfiguration() .WriteTo.Sink(sink) .MinimumLevel.Verbose() .CreateLogger()) using (var db = new TFChunkDb(config, log)) { byte[] contents = new byte[config.ChunkSize]; for (var i = 0; i < config.ChunkSize; i++) { contents[i] = 0; } /* * Create a completed chunk and an ongoing chunk */ DbUtil.CreateSingleChunk(config, 0, GetFilePathFor("chunk-000000.000000"), actualDataSize: config.ChunkSize, contents: contents); DbUtil.CreateOngoingChunk(config, 1, GetFilePathFor("chunk-000001.000000")); /** * Corrupt the prelast completed chunk by modifying bytes of its content */ using (Stream stream = File.Open(GetFilePathFor("chunk-000000.000000"), FileMode.Open)) { var data = new byte[3]; data[0] = 1; data[1] = 2; data[2] = 3; stream.Position = ChunkHeader.Size + 15; //arbitrary choice of position to modify stream.Write(data, 0, data.Length); } /** * Exception being thrown in another thread, using the output to check for the exception */ db.Open(verifyHash: true); //arbitrary wait Thread.Sleep(2000); } var thrownException = sink.LogEventReceived.WithTimeout().Result; Assert.IsInstanceOf <HashValidationException>(thrownException); var output = sink.Output; Assert.AreEqual(@"Verification of chunk ""#0-0 (chunk-000000.000000)"" failed, terminating server...", output); }
public void try_read_returns_record_when_writerchecksum_ahead() { var recordToWrite = new PrepareLogRecord(logPosition: 0, correlationId: _correlationId, eventId: _eventId, transactionPosition: 0, transactionOffset: 0, eventStreamId: "WorldEnding", expectedVersion: 1234, timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: "type", data: new byte[] { 1, 2, 3, 4, 5 }, metadata: new byte[] { 7, 17 }); using (var fs = new FileStream(GetFilePathFor("chunk-000000.000000"), FileMode.CreateNew, FileAccess.Write)) { fs.SetLength(ChunkHeader.Size + ChunkFooter.Size + 10000); var chunkHeader = new ChunkHeader(TFChunk.CurrentChunkVersion, 10000, 0, 0, false, Guid.NewGuid()).AsByteArray(); var writer = new BinaryWriter(fs); writer.Write(chunkHeader); recordToWrite.WriteWithLengthPrefixAndSuffixTo(writer); fs.Close(); } var writerchk = new InMemoryCheckpoint(128); var chaserchk = new InMemoryCheckpoint(Checkpoint.Chaser, 0); var db = new TFChunkDb(new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 10000, 0, writerchk, chaserchk, new InMemoryCheckpoint(-1), new InMemoryCheckpoint(-1))); db.Open(); var chaser = new TFChunkChaser(db, writerchk, chaserchk); chaser.Open(); LogRecord record; var recordRead = chaser.TryReadNext(out record); chaser.Close(); Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix(), chaserchk.Read()); Assert.IsTrue(recordRead); Assert.AreEqual(recordToWrite, record); db.Close(); }
public void try_read_returns_record_when_record_bigger_than_internal_buffer() { var writerchk = new InMemoryCheckpoint(0); var chaserchk = new InMemoryCheckpoint(Checkpoint.Chaser, 0); var db = new TFChunkDb(new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 10000, 0, writerchk, chaserchk, new InMemoryCheckpoint(-1), new InMemoryCheckpoint(-1))); db.Open(); var recordToWrite = new PrepareLogRecord(logPosition: 0, correlationId: _correlationId, eventId: _eventId, transactionPosition: 0, transactionOffset: 0, eventStreamId: "WorldEnding", expectedVersion: 1234, timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: "type", data: new byte[9000], metadata: new byte[] { 7, 17 }); var writer = new TFChunkWriter(db); writer.Open(); long pos; Assert.IsTrue(writer.Write(recordToWrite, out pos)); writer.Close(); writerchk.Write(recordToWrite.GetSizeWithLengthPrefixAndSuffix()); var reader = new TFChunkChaser(db, writerchk, chaserchk); reader.Open(); LogRecord record; var readRecord = reader.TryReadNext(out record); reader.Close(); Assert.IsTrue(readRecord); Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix(), chaserchk.Read()); Assert.AreEqual(recordToWrite, record); db.Close(); }
public void does_not_allow_next_new_completed_chunk_when_checksum_is_exactly_in_between_two_chunks() { var config = TFChunkHelper.CreateDbConfig(PathName, 10000); using (var db = new TFChunkDb(config)) { DbUtil.CreateSingleChunk(config, 0, GetFilePathFor("chunk-000000.000000")); DbUtil.CreateSingleChunk(config, 1, GetFilePathFor("chunk-000001.000000")); Assert.That(() => db.Open(verifyHash: false), Throws.Exception.InstanceOf <CorruptDatabaseException>() .With.InnerException.InstanceOf <BadChunkInDatabaseException>()); } }
public void try_read_returns_record_when_writerchecksum_ahead() { var recordFactory = LogFormatHelper <TLogFormat, TStreamId> .RecordFactory; var streamId = LogFormatHelper <TLogFormat, TStreamId> .StreamId; var eventTypeId = LogFormatHelper <TLogFormat, TStreamId> .EventTypeId; var recordToWrite = LogRecord.Prepare( factory: recordFactory, logPosition: 0, correlationId: _correlationId, eventId: _eventId, transactionPos: 0, transactionOffset: 0, eventStreamId: streamId, expectedVersion: 1234, timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: eventTypeId, data: new byte[] { 1, 2, 3, 4, 5 }, metadata: new byte[] { 7, 17 }); using (var fs = new FileStream(GetFilePathFor("chunk-000000.000000"), FileMode.CreateNew, FileAccess.Write)) { fs.SetLength(ChunkHeader.Size + ChunkFooter.Size + 10000); var chunkHeader = new ChunkHeader(TFChunk.CurrentChunkVersion, 10000, 0, 0, false, Guid.NewGuid()) .AsByteArray(); var writer = new BinaryWriter(fs); writer.Write(chunkHeader); recordToWrite.WriteWithLengthPrefixAndSuffixTo(writer); fs.Close(); } var writerchk = new InMemoryCheckpoint(recordToWrite.GetSizeWithLengthPrefixAndSuffix() + 16); var chaserchk = new InMemoryCheckpoint(Checkpoint.Chaser, 0); var db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, writerchk, chaserchk)); db.Open(); var chaser = new TFChunkChaser(db, writerchk, chaserchk, false); chaser.Open(); ILogRecord record; var recordRead = chaser.TryReadNext(out record); chaser.Close(); Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix(), chaserchk.Read()); Assert.IsTrue(recordRead); Assert.AreEqual(recordToWrite, record); db.Close(); }