Example #1
0
        does_not_allow_pre_last_chunk_to_be_not_completed_when_checksum_is_exactly_in_between_two_chunks_and_next_chunk_exists()
        {
            var config = TFChunkHelper.CreateDbConfig(PathName, 10000);

            using (var db = new TFChunkDb(config)) {
                DbUtil.CreateOngoingChunk(config, 0, GetFilePathFor("chunk-000000.000000"));
                DbUtil.CreateOngoingChunk(config, 1, GetFilePathFor("chunk-000001.000000"));
                Assert.That(() => db.Open(verifyHash: false),
                            Throws.Exception.InstanceOf <CorruptDatabaseException>()
                            .With.InnerException.InstanceOf <BadChunkInDatabaseException>());
            }
        }
Example #2
0
        public void when_in_first_extraneous_files_throws_corrupt_database_exception()
        {
            var config = TFChunkHelper.CreateDbConfig(PathName, 9000);

            using (var db = new TFChunkDb(config)) {
                DbUtil.CreateOngoingChunk(config, 0, GetFilePathFor("chunk-000000.000000"));
                DbUtil.CreateSingleChunk(config, 1, GetFilePathFor("chunk-000001.000000"));
                Assert.That(() => db.Open(verifyHash: false),
                            Throws.Exception.InstanceOf <CorruptDatabaseException>()
                            .With.InnerException.InstanceOf <ExtraneousFileFoundException>());
            }
        }
Example #3
0
        public void when_prelast_chunk_corrupted_throw_hash_validation_exception()
        {
            var config = TFChunkHelper.CreateDbConfig(PathName, 15000);
            var sink   = new TestLogEventSink();

            using (var log = new LoggerConfiguration()
                             .WriteTo.Sink(sink)
                             .MinimumLevel.Verbose()
                             .CreateLogger())
                using (var db = new TFChunkDb(config, log)) {
                    byte[] contents = new byte[config.ChunkSize];
                    for (var i = 0; i < config.ChunkSize; i++)
                    {
                        contents[i] = 0;
                    }

                    /*
                     * Create a completed chunk and an ongoing chunk
                     */
                    DbUtil.CreateSingleChunk(config, 0, GetFilePathFor("chunk-000000.000000"),
                                             actualDataSize: config.ChunkSize,
                                             contents: contents);
                    DbUtil.CreateOngoingChunk(config, 1, GetFilePathFor("chunk-000001.000000"));

                    /**
                     * Corrupt the prelast completed chunk by modifying bytes of its content
                     */
                    using (Stream stream = File.Open(GetFilePathFor("chunk-000000.000000"), FileMode.Open)) {
                        var data = new byte[3];
                        data[0]         = 1;
                        data[1]         = 2;
                        data[2]         = 3;
                        stream.Position = ChunkHeader.Size + 15;                 //arbitrary choice of position to modify
                        stream.Write(data, 0, data.Length);
                    }

                    /**
                     * Exception being thrown in another thread, using the output to check for the exception
                     */
                    db.Open(verifyHash: true);
                    //arbitrary wait
                    Thread.Sleep(2000);
                }

            var thrownException = sink.LogEventReceived.WithTimeout().Result;

            Assert.IsInstanceOf <HashValidationException>(thrownException);

            var output = sink.Output;

            Assert.AreEqual(@"Verification of chunk ""#0-0 (chunk-000000.000000)"" failed, terminating server...",
                            output);
        }
        public void when_prelast_chunk_corrupted_throw_hash_validation_exception()
        {
            var config = TFChunkHelper.CreateDbConfig(PathName, 15000);

            using (var db = new TFChunkDb(config))
            {
                byte[] contents = new byte[config.ChunkSize];
                for (var i = 0; i < config.ChunkSize; i++)
                {
                    contents[i] = 0;
                }

                /*
                 * Create a completed chunk and an ongoing chunk
                 */
                DbUtil.CreateSingleChunk(config, 0, GetFilePathFor("chunk-000000.000000"),
                                         actualDataSize: config.ChunkSize,
                                         contents: contents);
                DbUtil.CreateOngoingChunk(config, 1, GetFilePathFor("chunk-000001.000000"));

                /**
                 * Corrupt the prelast completed chunk by modifying bytes of its content
                 */
                using (Stream stream = File.Open(GetFilePathFor("chunk-000000.000000"), FileMode.Open))
                {
                    var data = new byte[3];
                    data[0]         = 1;
                    data[1]         = 2;
                    data[2]         = 3;
                    stream.Position = ChunkHeader.Size + 15; //arbitrary choice of position to modify
                    stream.Write(data, 0, data.Length);
                }

                /**
                 * Exception being thrown in another thread, using the output to check for the exception
                 */
                var output = "";
                using (StringWriter sw = new StringWriter())
                {
                    Console.SetOut(sw);
                    db.Open(verifyHash: true);
                    //arbitrary wait
                    Thread.Sleep(2000);
                    output = sw.ToString();
                }
                var standardOutput = new StreamWriter(Console.OpenStandardOutput());
                standardOutput.AutoFlush = true;
                Console.SetOut(standardOutput);
                Console.WriteLine(output);
                Assert.IsTrue(output.Contains("EXCEPTION(S) OCCURRED:"));
                Assert.IsTrue(output.Contains("EventStore.Core.Exceptions.HashValidationException"));
            }
        }
        public void with_not_enough_files_to_reach_checksum_throws()
        {
            var config = TFChunkHelper.CreateDbConfig(PathName, 15000);

            using (var db = new TFChunkDb(config))
            {
                DbUtil.CreateSingleChunk(config, 0, GetFilePathFor("chunk-000000.000000"));
                Assert.That(() => db.Open(verifyHash: false),
                            Throws.Exception.InstanceOf <CorruptDatabaseException>()
                            .With.InnerException.InstanceOf <ChunkNotFoundException>());
            }
        }
        public void does_not_allow_first_completed_chunk_when_checkpoint_is_zero()
        {
            var config = TFChunkHelper.CreateDbConfig(PathName, 0);

            using (var db = new TFChunkDb(config))
            {
                DbUtil.CreateSingleChunk(config, 0, GetFilePathFor("chunk-000000.000000"));
                Assert.That(() => db.Open(verifyHash: false),
                            Throws.Exception.InstanceOf <CorruptDatabaseException>()
                            .With.InnerException.InstanceOf <BadChunkInDatabaseException>());
            }
        }
        public void with_file_of_wrong_size_database_corruption_is_detected()
        {
            var config = TFChunkHelper.CreateDbConfig(PathName, 500);

            using (var db = new TFChunkDb(config))
            {
                File.WriteAllText(GetFilePathFor("chunk-000000.000000"), "this is just some test blahbydy blah");
                Assert.That(() => db.Open(verifyHash: false),
                            Throws.Exception.InstanceOf <CorruptDatabaseException>()
                            .With.InnerException.InstanceOf <BadChunkInDatabaseException>());
            }
        }
        public void with_wrong_size_file_less_than_checksum_throws()
        {
            var config = TFChunkHelper.CreateDbConfig(PathName, 15000);

            using (var db = new TFChunkDb(config))
            {
                DbUtil.CreateSingleChunk(config, 0, GetFilePathFor("chunk-000000.000000"));
                DbUtil.CreateSingleChunk(config, 1, GetFilePathFor("chunk-000001.000000"), actualDataSize: config.ChunkSize - 1000);
                Assert.That(() => db.Open(verifyHash: false),
                            Throws.Exception.InstanceOf <CorruptDatabaseException>()
                            .With.InnerException.InstanceOf <BadChunkInDatabaseException>());
            }
        }
Example #9
0
        does_not_allow_checkpoint_to_point_into_the_middle_of_completed_chunk_when_not_enough_actual_data()
        {
            var config = TFChunkHelper.CreateDbConfig(PathName, 1500);

            using (var db = new TFChunkDb(config)) {
                DbUtil.CreateSingleChunk(config, 0, GetFilePathFor("chunk-000000.000000"));
                DbUtil.CreateSingleChunk(config, 1, GetFilePathFor("chunk-000001.000001"), actualDataSize: 499);

                Assert.That(() => db.Open(verifyHash: false),
                            Throws.Exception.InstanceOf <CorruptDatabaseException>()
                            .With.InnerException.InstanceOf <BadChunkInDatabaseException>());
            }
        }
        public void does_not_allow_checkpoint_to_point_into_the_middle_of_multichunk_chunk()
        {
            var config = TFChunkHelper.CreateSizedDbConfig(PathName, 1500, chunkSize: 1000);

            using (var db = new TFChunkDb(config)) {
                DbUtil.CreateSingleChunk(config, 0, GetFilePathFor("chunk-000000.000000"));
                DbUtil.CreateMultiChunk(config, 1, 10, GetFilePathFor("chunk-000001.000001"));

                Assert.That(() => db.Open(verifyHash: false),
                            Throws.Exception.InstanceOf <CorruptDatabaseException>()
                            .With.InnerException.InstanceOf <BadChunkInDatabaseException>());
            }
        }
        public void CreateDb(params Rec[] records)
        {
            if (DbRes != null)
            {
                DbRes.Db.Close();
            }

            var dbConfig = TFChunkHelper.CreateSizedDbConfig(PathName, 0, chunkSize: 1024 * 1024);
            var dbHelper = new TFChunkDbCreationHelper <TLogFormat, TStreamId>(dbConfig);

            DbRes = dbHelper.Chunk(records).CreateDb();

            DbRes.Db.Config.WriterCheckpoint.Flush();
            DbRes.Db.Config.ChaserCheckpoint.Write(DbRes.Db.Config.WriterCheckpoint.Read());
            DbRes.Db.Config.ChaserCheckpoint.Flush();

            var logFormat = LogFormatHelper <TLogFormat, TStreamId> .LogFormat;
            var readers   = new ObjectPool <ITransactionFileReader>(
                "Readers", 2, 2, () => new TFChunkReader(DbRes.Db, DbRes.Db.Config.WriterCheckpoint));

            var lowHasher     = logFormat.LowHasher;
            var highHasher    = logFormat.HighHasher;
            var emptyStreamId = logFormat.EmptyStreamId;

            TableIndex = new TableIndex <TStreamId>(GetFilePathFor("index"), lowHasher, highHasher, emptyStreamId,
                                                    () => new HashListMemTable(PTableVersions.IndexV3, MaxEntriesInMemTable * 2),
                                                    () => new TFReaderLease(readers),
                                                    PTableVersions.IndexV3,
                                                    int.MaxValue,
                                                    Constants.PTableMaxReaderCountDefault,
                                                    MaxEntriesInMemTable);

            var readIndex = new ReadIndex <TStreamId>(new NoopPublisher(),
                                                      readers,
                                                      TableIndex,
                                                      logFormat.StreamIds,
                                                      logFormat.StreamNamesProvider,
                                                      logFormat.EmptyStreamId,
                                                      logFormat.StreamIdValidator,
                                                      logFormat.StreamIdSizer,
                                                      0,
                                                      additionalCommitChecks: true,
                                                      metastreamMaxCount: _metastreamMaxCount,
                                                      hashCollisionReadLimit: Opts.HashCollisionReadLimitDefault,
                                                      skipIndexScanOnReads: Opts.SkipIndexScanOnReadsDefault,
                                                      replicationCheckpoint: DbRes.Db.Config.ReplicationCheckpoint,
                                                      indexCheckpoint: DbRes.Db.Config.IndexCheckpoint);

            readIndex.IndexCommitter.Init(DbRes.Db.Config.ChaserCheckpoint.Read());
            ReadIndex = new TestReadIndex <TStreamId>(readIndex, logFormat.StreamNameIndex);
        }
Example #12
0
        public override async Task TestFixtureSetUp()
        {
            await base.TestFixtureSetUp();

            // writer checkpoint = 5500, truncate to 0, max truncation = 1000
            _config = TFChunkHelper.CreateDbConfig(PathName, 5500, 5500, 5500, 0, 1000, maxTruncation: 1000);

            DbUtil.CreateMultiChunk(_config, 0, 2, GetFilePathFor("chunk-000000.000001"));
            DbUtil.CreateMultiChunk(_config, 0, 2, GetFilePathFor("chunk-000000.000002"));
            DbUtil.CreateMultiChunk(_config, 3, 10, GetFilePathFor("chunk-000003.000001"));
            DbUtil.CreateMultiChunk(_config, 3, 10, GetFilePathFor("chunk-000003.000002"));
            DbUtil.CreateMultiChunk(_config, 7, 8, GetFilePathFor("chunk-000007.000001"));
            DbUtil.CreateOngoingChunk(_config, 11, GetFilePathFor("chunk-000011.000000"));
        }
Example #13
0
        public void does_not_allow_not_completed_not_last_chunks()
        {
            var config = TFChunkHelper.CreateDbConfig(PathName, 4000, chunkSize: 1000);

            using (var db = new TFChunkDb(config)) {
                DbUtil.CreateSingleChunk(config, 0, GetFilePathFor("chunk-000000.000000"));
                DbUtil.CreateOngoingChunk(config, 1, GetFilePathFor("chunk-000001.000000"));
                DbUtil.CreateOngoingChunk(config, 2, GetFilePathFor("chunk-000002.000000"));
                DbUtil.CreateOngoingChunk(config, 3, GetFilePathFor("chunk-000003.000000"));
                Assert.That(() => db.Open(verifyHash: false),
                            Throws.Exception.InstanceOf <CorruptDatabaseException>()
                            .With.InnerException.InstanceOf <BadChunkInDatabaseException>());
            }
        }
        public void when_checkpoint_is_on_boundary_of_new_chunk_and_last_chunk_is_truncated_but_not_completed_exception_is_thrown()
        {
            var config = TFChunkHelper.CreateDbConfig(PathName, 200, chunkSize: 100);

            using (var db = new TFChunkDb(config))
            {
                DbUtil.CreateSingleChunk(config, 0, GetFilePathFor("chunk-000000.000000"));
                DbUtil.CreateOngoingChunk(config, 1, GetFilePathFor("chunk-000001.000001"), actualSize: config.ChunkSize - 10);

                Assert.That(() => db.Open(verifyHash: false),
                            Throws.Exception.InstanceOf <CorruptDatabaseException>()
                            .With.InnerException.InstanceOf <BadChunkInDatabaseException>());
            }
        }
        public void allows_next_new_chunk_when_checksum_is_exactly_in_between_two_chunks_if_last_is_ongoing_chunk()
        {
            var config = TFChunkHelper.CreateDbConfig(PathName, 20000);

            using (var db = new TFChunkDb(config)) {
                DbUtil.CreateMultiChunk(config, 0, 1, GetFilePathFor("chunk-000000.000001"));
                DbUtil.CreateOngoingChunk(config, 2, GetFilePathFor("chunk-000002.000000"));
                Assert.DoesNotThrow(() => db.Open(verifyHash: false));

                Assert.IsTrue(File.Exists(GetFilePathFor("chunk-000000.000001")));
                Assert.IsTrue(File.Exists(GetFilePathFor("chunk-000002.000000")));
                Assert.AreEqual(2, Directory.GetFiles(PathName, "*").Length);
            }
        }
Example #16
0
        public void does_not_allow_next_new_chunk_when_checksum_is_exactly_in_between_two_chunks_and_last_is_multi_chunk()
        {
            var config = TFChunkHelper.CreateDbConfig(PathName, 10000);

            using (var db = new TFChunkDb(config))
            {
                DbUtil.CreateSingleChunk(config, 0, GetFilePathFor("chunk-000000.000000"));
                DbUtil.CreateMultiChunk(config, 1, 2, GetFilePathFor("chunk-000001.000000"));

                Assert.That(() => db.Open(verifyHash: false),
                            Throws.Exception.InstanceOf <CorruptDatabaseException>()
                            .With.InnerException.InstanceOf <BadChunkInDatabaseException>());
            }
        }
Example #17
0
        private void ReOpenDb()
        {
            Db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, WriterCheckpoint, ChaserCheckpoint));

            Db.Open();

            var indexDirectory = GetFilePathFor("index");

            _logFormat = LogFormatHelper <TLogFormat, TStreamId> .LogFormatFactory.Create(new() {
                IndexDirectory = indexDirectory,
            });

            var readers = new ObjectPool <ITransactionFileReader>("Readers", 2, 5,
                                                                  () => new TFChunkReader(Db, Db.Config.WriterCheckpoint));
            var lowHasher     = _logFormat.LowHasher;
            var highHasher    = _logFormat.HighHasher;
            var emptyStreamId = _logFormat.EmptyStreamId;

            TableIndex = new TableIndex <TStreamId>(indexDirectory, lowHasher, highHasher, emptyStreamId,
                                                    () => new HashListMemTable(PTableVersions.IndexV3, MaxEntriesInMemTable * 2),
                                                    () => new TFReaderLease(readers),
                                                    PTableVersions.IndexV3,
                                                    int.MaxValue,
                                                    Constants.PTableMaxReaderCountDefault,
                                                    MaxEntriesInMemTable);
            _logFormat.StreamNamesProvider.SetTableIndex(TableIndex);
            var readIndex = new ReadIndex <TStreamId>(new NoopPublisher(),
                                                      readers,
                                                      TableIndex,
                                                      _logFormat.StreamNameIndexConfirmer,
                                                      _logFormat.StreamIds,
                                                      _logFormat.StreamNamesProvider,
                                                      _logFormat.EmptyStreamId,
                                                      _logFormat.StreamIdValidator,
                                                      _logFormat.StreamIdSizer,
                                                      _logFormat.StreamExistenceFilter,
                                                      _logFormat.StreamExistenceFilterReader,
                                                      _logFormat.EventTypeIndexConfirmer,
                                                      streamInfoCacheCapacity: 0,
                                                      additionalCommitChecks: true,
                                                      metastreamMaxCount: MetastreamMaxCount,
                                                      hashCollisionReadLimit: Opts.HashCollisionReadLimitDefault,
                                                      skipIndexScanOnReads: Opts.SkipIndexScanOnReadsDefault,
                                                      replicationCheckpoint: Db.Config.ReplicationCheckpoint,
                                                      indexCheckpoint: Db.Config.IndexCheckpoint);

            readIndex.IndexCommitter.Init(ChaserCheckpoint.Read());
            ReadIndex = readIndex;
        }
Example #18
0
        public override void TestFixtureSetUp()
        {
            base.TestFixtureSetUp();
            string dbPath = Path.Combine(PathName, string.Format("mini-node-db-{0}", Guid.NewGuid()));

            Bus          = new InMemoryBus("bus");
            IODispatcher = new IODispatcher(Bus, new PublishEnvelope(Bus));

            if (!Directory.Exists(dbPath))
            {
                Directory.CreateDirectory(dbPath);
            }

            var writerCheckFilename = Path.Combine(dbPath, Checkpoint.Writer + ".chk");
            var chaserCheckFilename = Path.Combine(dbPath, Checkpoint.Chaser + ".chk");

            if (Runtime.IsMono)
            {
                WriterCheckpoint = new FileCheckpoint(writerCheckFilename, Checkpoint.Writer, cached: true);
                ChaserCheckpoint = new FileCheckpoint(chaserCheckFilename, Checkpoint.Chaser, cached: true);
            }
            else
            {
                WriterCheckpoint = new MemoryMappedFileCheckpoint(writerCheckFilename, Checkpoint.Writer, cached: true);
                ChaserCheckpoint = new MemoryMappedFileCheckpoint(chaserCheckFilename, Checkpoint.Chaser, cached: true);
            }

            Db = new TFChunkDb(TFChunkHelper.CreateDbConfig(dbPath, WriterCheckpoint, ChaserCheckpoint,
                                                            TFConsts.ChunkSize));
            Db.Open();

            // create DB
            Writer = new TFChunkWriter(Db);
            Writer.Open();
            WriteTestScenario();

            Writer.Close();
            Writer = null;
            WriterCheckpoint.Flush();
            ChaserCheckpoint.Write(WriterCheckpoint.Read());
            ChaserCheckpoint.Flush();
            Db.Close();

            // start node with our created DB
            Node = new MiniNode(PathName, inMemDb: false, dbPath: dbPath);
            Node.Start();

            Given();
        }
        public void allows_with_exactly_enough_file_to_reach_checksum_while_last_is_multi_chunk()
        {
            var config = TFChunkHelper.CreateDbConfig(PathName, 30000);

            using (var db = new TFChunkDb(config)) {
                DbUtil.CreateSingleChunk(config, 0, GetFilePathFor("chunk-000000.000000"));
                DbUtil.CreateMultiChunk(config, 1, 2, GetFilePathFor("chunk-000001.000001"));
                Assert.DoesNotThrow(() => db.Open(verifyHash: false));

                Assert.IsTrue(File.Exists(GetFilePathFor("chunk-000000.000000")));
                Assert.IsTrue(File.Exists(GetFilePathFor("chunk-000001.000001")));
                Assert.IsTrue(File.Exists(GetFilePathFor("chunk-000003.000000")));
                Assert.AreEqual(3, Directory.GetFiles(PathName, "*").Length);
            }
        }
        private StandardComponents CreateStandardComponents()
        {
            var db        = new TFChunkDb(TFChunkHelper.CreateDbConfig(Path.GetTempPath(), 0));
            var mainQueue = QueuedHandler.CreateQueuedHandler
                                (new AdHocHandler <Message>(msg => {
                /* Ignore messages */
            }), "MainQueue", new QueueStatsManager());
            var mainBus = new InMemoryBus("mainBus");
            var threadBasedScheduler = new ThreadBasedScheduler(new RealTimeProvider(), new QueueStatsManager());
            var timerService         = new TimerService(threadBasedScheduler);

            return(new StandardComponents(db, mainQueue, mainBus,
                                          timerService, timeProvider: null, httpForwarder: null, httpServices: new IHttpService[] { },
                                          networkSendService: null, queueStatsManager: new QueueStatsManager()));
        }
Example #21
0
        public void allows_checkpoint_to_point_into_the_middle_of_completed_chunk_when_enough_actual_data_in_chunk()
        {
            var config = TFChunkHelper.CreateDbConfig(PathName, 1500, chunkSize: 1000);

            using (var db = new TFChunkDb(config)) {
                DbUtil.CreateSingleChunk(config, 0, GetFilePathFor("chunk-000000.000000"));
                DbUtil.CreateSingleChunk(config, 1, GetFilePathFor("chunk-000001.000001"), actualDataSize: 500);

                Assert.DoesNotThrow(() => db.Open(verifyHash: false));

                Assert.IsTrue(File.Exists(GetFilePathFor("chunk-000000.000000")));
                Assert.IsTrue(File.Exists(GetFilePathFor("chunk-000001.000001")));
                Assert.AreEqual(2, Directory.GetFiles(PathName, "*").Length);
            }
        }
        when_checkpoint_is_exactly_on_the_boundary_of_chunk_the_last_chunk_could_be_not_present_but_should_be_created()
        {
            var config = TFChunkHelper.CreateSizedDbConfig(PathName, 200, chunkSize: 100);

            using (var db = new TFChunkDb(config)) {
                DbUtil.CreateMultiChunk(config, 0, 1, GetFilePathFor("chunk-000000.000001"));

                Assert.DoesNotThrow(() => db.Open(verifyHash: false));
                Assert.IsNotNull(db.Manager.GetChunk(2));

                Assert.IsTrue(File.Exists(GetFilePathFor("chunk-000000.000001")));
                Assert.IsTrue(File.Exists(GetFilePathFor("chunk-000002.000000")));
                Assert.AreEqual(2, Directory.GetFiles(PathName, "*").Length);
            }
        }
Example #23
0
        public override async Task TestFixtureSetUp()
        {
            await base.TestFixtureSetUp();

            _mainBus = new InMemoryBus(nameof(when_having_an_epoch_manager_and_empty_tf_log <TLogFormat, TStreamId>));
            _mainBus.Subscribe(new AdHocHandler <SystemMessage.EpochWritten>(m => _published.Add(m)));
            _db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, 0));
            _db.Open();
            _reader = new TFChunkReader(_db, _db.Config.WriterCheckpoint);
            _writer = new TFChunkWriter(_db);

            _epochManager = GetManager();
            _epochManager.Init();
            _cache = GetCache(_epochManager);
            Assert.NotNull(_cache);
        }
        public void allows_last_chunk_to_be_multichunk_when_checkpoint_point_at_the_start_of_next_chunk()
        {
            var config = TFChunkHelper.CreateSizedDbConfig(PathName, 4000, chunkSize: 1000);

            using (var db = new TFChunkDb(config)) {
                DbUtil.CreateSingleChunk(config, 0, GetFilePathFor("chunk-000000.000000"));
                DbUtil.CreateMultiChunk(config, 1, 3, GetFilePathFor("chunk-000001.000001"));

                Assert.DoesNotThrow(() => db.Open(verifyHash: false));

                Assert.IsTrue(File.Exists(GetFilePathFor("chunk-000000.000000")));
                Assert.IsTrue(File.Exists(GetFilePathFor("chunk-000001.000001")));
                Assert.IsTrue(File.Exists(GetFilePathFor("chunk-000004.000000")));
                Assert.AreEqual(3, Directory.GetFiles(PathName, "*").Length);
            }
        }
        public override async Task TestFixtureSetUp()
        {
            await base.TestFixtureSetUp();

            var dbConfig         = TFChunkHelper.CreateSizedDbConfig(PathName, 0, chunkSize: 1024 * 1024);
            var dbCreationHelper = new TFChunkDbCreationHelper <TLogFormat, TStreamId>(dbConfig);

            _dbResult    = CreateDb(dbCreationHelper);
            _keptRecords = KeptRecords(_dbResult);

            _dbResult.Db.Config.WriterCheckpoint.Flush();
            _dbResult.Db.Config.ChaserCheckpoint.Write(_dbResult.Db.Config.WriterCheckpoint.Read());
            _dbResult.Db.Config.ChaserCheckpoint.Flush();

            var logFormat  = LogFormatHelper <TLogFormat, TStreamId> .LogFormat;
            var indexPath  = Path.Combine(PathName, "index");
            var readerPool = new ObjectPool <ITransactionFileReader>(
                "ReadIndex readers pool", Constants.PTableInitialReaderCount, Constants.PTableMaxReaderCountDefault,
                () => new TFChunkReader(_dbResult.Db, _dbResult.Db.Config.WriterCheckpoint));
            var lowHasher     = logFormat.LowHasher;
            var highHasher    = logFormat.HighHasher;
            var emptyStreamId = logFormat.EmptyStreamId;
            var tableIndex    = new TableIndex <TStreamId>(indexPath, lowHasher, highHasher, emptyStreamId,
                                                           () => new HashListMemTable(PTableVersions.IndexV3, maxSize: 200),
                                                           () => new TFReaderLease(readerPool),
                                                           PTableVersions.IndexV3,
                                                           5, Constants.PTableMaxReaderCountDefault,
                                                           maxSizeForMemory: 100,
                                                           maxTablesPerLevel: 2);
            var readIndex = new ReadIndex <TStreamId>(new NoopPublisher(), readerPool, tableIndex,
                                                      logFormat.StreamIds,
                                                      logFormat.StreamNamesProvider,
                                                      logFormat.EmptyStreamId,
                                                      logFormat.StreamIdValidator,
                                                      logFormat.StreamIdSizer,
                                                      100, true, _metastreamMaxCount,
                                                      Opts.HashCollisionReadLimitDefault, Opts.SkipIndexScanOnReadsDefault,
                                                      _dbResult.Db.Config.ReplicationCheckpoint, _dbResult.Db.Config.IndexCheckpoint);

            readIndex.IndexCommitter.Init(_dbResult.Db.Config.WriterCheckpoint.Read());
            ReadIndex = new TestReadIndex <TStreamId>(readIndex, logFormat.StreamNameIndex);

            var scavenger = new TFChunkScavenger <TStreamId>(_dbResult.Db, new FakeTFScavengerLog(), tableIndex, ReadIndex,
                                                             logFormat.SystemStreams,
                                                             unsafeIgnoreHardDeletes: UnsafeIgnoreHardDelete());
            await scavenger.Scavenge(alwaysKeepScavenged : true, mergeChunks : false);
        }
        public override async Task TestFixtureSetUp()
        {
            await base.TestFixtureSetUp();

            var indexDirectory = GetFilePathFor("index");

            _logFormat = LogFormatHelper <TLogFormat, TStreamId> .LogFormatFactory.Create(new() {
                IndexDirectory = indexDirectory,
            });

            _mainBus = new InMemoryBus(nameof(when_starting_having_TFLog_with_no_epochs <TLogFormat, TStreamId>));
            _mainBus.Subscribe(new AdHocHandler <SystemMessage.EpochWritten>(m => _published.Add(m)));
            _db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, 0));
            _db.Open();
            _reader = new TFChunkReader(_db, _db.Config.WriterCheckpoint);
            _writer = new TFChunkWriter(_db);
        }
Example #27
0
        public void when_checkpoint_is_on_boundary_of_chunk_last_chunk_is_preserved()
        {
            var config = TFChunkHelper.CreateDbConfig(PathName, 200, chunkSize: 100);

            using (var db = new TFChunkDb(config)) {
                DbUtil.CreateSingleChunk(config, 0, GetFilePathFor("chunk-000000.000000"));
                DbUtil.CreateSingleChunk(config, 1, GetFilePathFor("chunk-000001.000001"));
                DbUtil.CreateOngoingChunk(config, 2, GetFilePathFor("chunk-000002.000005"));

                Assert.DoesNotThrow(() => db.Open(verifyHash: false));

                Assert.IsTrue(File.Exists(GetFilePathFor("chunk-000000.000000")));
                Assert.IsTrue(File.Exists(GetFilePathFor("chunk-000001.000001")));
                Assert.IsTrue(File.Exists(GetFilePathFor("chunk-000002.000005")));
                Assert.AreEqual(3, Directory.GetFiles(PathName, "*").Length);
            }
        }
Example #28
0
        public override void TestFixtureSetUp()
        {
            base.TestFixtureSetUp();

            _config = TFChunkHelper.CreateDbConfig(PathName, 11111, 5500, 5500, 5757, 1000);

            DbUtil.CreateMultiChunk(_config, 0, 2, GetFilePathFor("chunk-000000.000001"));
            DbUtil.CreateMultiChunk(_config, 0, 2, GetFilePathFor("chunk-000000.000002"));
            DbUtil.CreateMultiChunk(_config, 3, 10, GetFilePathFor("chunk-000003.000001"));
            DbUtil.CreateMultiChunk(_config, 3, 10, GetFilePathFor("chunk-000003.000002"));
            DbUtil.CreateMultiChunk(_config, 7, 8, GetFilePathFor("chunk-000007.000001"));
            DbUtil.CreateOngoingChunk(_config, 11, GetFilePathFor("chunk-000011.000000"));

            var truncator = new TFChunkDbTruncator(_config);

            truncator.TruncateDb(_config.TruncateCheckpoint.ReadNonFlushed());
        }
        public override async Task TestFixtureSetUp()
        {
            await base.TestFixtureSetUp();

            string dbPath = Path.Combine(PathName, string.Format("mini-node-db-{0}", Guid.NewGuid()));

            Bus          = new InMemoryBus("bus");
            IODispatcher = new IODispatcher(Bus, new PublishEnvelope(Bus));

            if (!Directory.Exists(dbPath))
            {
                Directory.CreateDirectory(dbPath);
            }

            var writerCheckFilename = Path.Combine(dbPath, Checkpoint.Writer + ".chk");
            var chaserCheckFilename = Path.Combine(dbPath, Checkpoint.Chaser + ".chk");

            WriterCheckpoint = new MemoryMappedFileCheckpoint(writerCheckFilename, Checkpoint.Writer, cached: true);
            ChaserCheckpoint = new MemoryMappedFileCheckpoint(chaserCheckFilename, Checkpoint.Chaser, cached: true);

            Db = new TFChunkDb(TFChunkHelper.CreateDbConfig(dbPath, WriterCheckpoint, ChaserCheckpoint, TFConsts.ChunkSize));
            Db.Open();

            // create DB
            Writer = new TFChunkWriter(Db);
            Writer.Open();
            WriteTestScenario();

            Writer.Close();
            Writer = null;
            WriterCheckpoint.Flush();
            ChaserCheckpoint.Write(WriterCheckpoint.Read());
            ChaserCheckpoint.Flush();
            Db.Close();

            // start node with our created DB
            Node = new MiniNode(PathName, inMemDb: false, dbPath: dbPath);
            await Node.Start();

            try {
                await Given().WithTimeout();
            } catch (Exception ex) {
                throw new Exception("Given Failed", ex);
            }
        }
Example #30
0
        public void when_checkpoint_is_on_boundary_of_new_chunk_and_last_chunk_is_truncated_no_exception_is_thrown()
        {
            var config = TFChunkHelper.CreateDbConfig(PathName, 300, chunkSize: 100);

            using (var db = new TFChunkDb(config))
            {
                DbUtil.CreateSingleChunk(config, 0, GetFilePathFor("chunk-000000.000000"));
                DbUtil.CreateMultiChunk(config, 1, 2, GetFilePathFor("chunk-000001.000001"), physicalSize: 50, logicalSize: 150);

                Assert.DoesNotThrow(() => db.Open(verifyHash: false));
                Assert.IsNotNull(db.Manager.GetChunk(2));

                Assert.IsTrue(File.Exists(GetFilePathFor("chunk-000000.000000")));
                Assert.IsTrue(File.Exists(GetFilePathFor("chunk-000001.000001")));
                Assert.IsTrue(File.Exists(GetFilePathFor("chunk-000003.000000")));
                Assert.AreEqual(3, Directory.GetFiles(PathName, "*").Length);
            }
        }