public void when_checkpoint_is_on_boundary_of_new_chunk_and_last_chunk_is_truncated_no_exception_is_thrown() { var config = TFChunkHelper.CreateSizedDbConfig(PathName, 300, chunkSize: 100); using (var db = new TFChunkDb(config)) { DbUtil.CreateSingleChunk(config, 0, GetFilePathFor("chunk-000000.000000")); DbUtil.CreateMultiChunk(config, 1, 2, GetFilePathFor("chunk-000001.000001"), physicalSize: 50, logicalSize: 150); Assert.DoesNotThrow(() => db.Open(verifyHash: false)); Assert.IsNotNull(db.Manager.GetChunk(2)); Assert.IsTrue(File.Exists(GetFilePathFor("chunk-000000.000000"))); Assert.IsTrue(File.Exists(GetFilePathFor("chunk-000001.000001"))); Assert.IsTrue(File.Exists(GetFilePathFor("chunk-000003.000000"))); Assert.AreEqual(3, Directory.GetFiles(PathName, "*").Length); } }
public void when_in_first_extraneous_files_throws_corrupt_database_exception() { var config = new TFChunkDbConfig(PathName, new PrefixFileNamingStrategy(PathName, "prefix.tf"), 10000, 0, new InMemoryCheckpoint(9000), new InMemoryCheckpoint(), new ICheckpoint[0]); var db = new TFChunkDb(config); CreateChunk(Path.Combine(PathName, config.FileNamingStrategy.GetFilenameFor(0)), config.ChunkSize, config.ChunkSize); CreateChunk(Path.Combine(PathName, config.FileNamingStrategy.GetFilenameFor(1)), config.ChunkSize, config.ChunkSize); var ex = Assert.Throws <CorruptDatabaseException>(() => db.OpenVerifyAndClean(verifyHash: false)); Assert.IsInstanceOf <ExtraneousFileFoundException>(ex.InnerException); db.Dispose(); }
public void allows_next_new_chunk_when_checksum_is_exactly_in_between_two_chunks() { var config = new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 10000, 0, new InMemoryCheckpoint(10000), new InMemoryCheckpoint(), new InMemoryCheckpoint(-1), new InMemoryCheckpoint(-1)); using (var db = new TFChunkDb(config)) { DbUtil.CreateSingleChunk(config, 0, GetFilePathFor("chunk-000000.000000")); DbUtil.CreateOngoingChunk(config, 1, GetFilePathFor("chunk-000001.000000")); Assert.DoesNotThrow(() => db.Open(verifyHash: false)); } }
public void with_wrong_size_file_less_than_checksum_throws() { var config = new TFChunkDbConfig(PathName, new PrefixFileNamingStrategy(PathName, "prefix.tf"), 10000, 0, new InMemoryCheckpoint(15000), new InMemoryCheckpoint(), new ICheckpoint[0]); var db = new TFChunkDb(config); CreateChunk(Path.Combine(PathName, config.FileNamingStrategy.GetFilenameFor(0)), config.ChunkSize, config.ChunkSize); CreateChunk(Path.Combine(PathName, config.FileNamingStrategy.GetFilenameFor(1)), config.ChunkSize - 1000, config.ChunkSize); var ex = Assert.Throws <CorruptDatabaseException>(() => db.OpenVerifyAndClean(verifyHash: false)); Assert.IsInstanceOf <BadChunkInDatabaseException>(ex.InnerException); db.Dispose(); }
public void when_checkpoint_is_exactly_on_the_boundary_of_chunk_the_last_chunk_could_be_present() { var config = TFChunkHelper.CreateSizedDbConfig(PathName, 200, chunkSize: 100); using (var db = new TFChunkDb(config)) { DbUtil.CreateSingleChunk(config, 0, GetFilePathFor("chunk-000000.000000")); DbUtil.CreateSingleChunk(config, 1, GetFilePathFor("chunk-000001.000001")); DbUtil.CreateOngoingChunk(config, 2, GetFilePathFor("chunk-000002.000000")); Assert.DoesNotThrow(() => db.Open(verifyHash: false)); Assert.IsNotNull(db.Manager.GetChunk(2)); Assert.IsTrue(File.Exists(GetFilePathFor("chunk-000000.000000"))); Assert.IsTrue(File.Exists(GetFilePathFor("chunk-000001.000001"))); Assert.IsTrue(File.Exists(GetFilePathFor("chunk-000002.000000"))); Assert.AreEqual(3, Directory.GetFiles(PathName, "*").Length); } }
when_checkpoint_is_on_boundary_of_new_chunk_last_chunk_is_preserved_and_excessive_versions_are_removed_if_present() { var config = TFChunkHelper.CreateSizedDbConfig(PathName, 200, chunkSize: 100); using (var db = new TFChunkDb(config)) { DbUtil.CreateSingleChunk(config, 0, GetFilePathFor("chunk-000000.000000")); DbUtil.CreateSingleChunk(config, 1, GetFilePathFor("chunk-000001.000001")); DbUtil.CreateSingleChunk(config, 2, GetFilePathFor("chunk-000002.000000")); DbUtil.CreateOngoingChunk(config, 2, GetFilePathFor("chunk-000002.000001")); Assert.DoesNotThrow(() => db.Open(verifyHash: false)); Assert.IsTrue(File.Exists(GetFilePathFor("chunk-000000.000000"))); Assert.IsTrue(File.Exists(GetFilePathFor("chunk-000001.000001"))); Assert.IsTrue(File.Exists(GetFilePathFor("chunk-000002.000001"))); Assert.AreEqual(3, Directory.GetFiles(PathName, "*").Length); } }
public void when_an_epoch_checksum_is_ahead_of_writer_checksum_throws_corrupt_database_exception() { var config = new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 10000, 0, new InMemoryCheckpoint(0), new InMemoryCheckpoint(0), new InMemoryCheckpoint(11), new InMemoryCheckpoint(-1)); using (var db = new TFChunkDb(config)) { Assert.That(() => db.Open(verifyHash: false), Throws.Exception.InstanceOf <CorruptDatabaseException>() .With.InnerException.InstanceOf <ReaderCheckpointHigherThanWriterException>()); } }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); _db = new TFChunkDb( TFChunkHelper.CreateDbConfig(PathName, 0, chunkSize: 4096)); _db.Open(); var chunk = _db.Manager.GetChunk(0); _records = new LogRecord[RecordsCount]; _results = new RecordWriteResult[RecordsCount]; for (int i = 0; i < _records.Length - 1; ++i) { _records[i] = LogRecord.SingleWrite( i == 0 ? 0 : _results[i - 1].NewPosition, Guid.NewGuid(), Guid.NewGuid(), "es1", ExpectedVersion.Any, "et1", new byte[] { 0, 1, 2 }, new byte[] { 5, 7 }); _results[i] = chunk.TryAppend(_records[i]); } _records[_records.Length - 1] = LogRecord.Prepare( _results[_records.Length - 1 - 1].NewPosition, Guid.NewGuid(), Guid.NewGuid(), _results[_records.Length - 1 - 1].NewPosition, 0, "es1", ExpectedVersion.Any, PrepareFlags.Data, "et1", new byte[] { 0, 1, 2 }, new byte[] { 5, 7 }); _results[_records.Length - 1] = chunk.TryAppend(_records[_records.Length - 1]); chunk.Flush(); _db.Config.WriterCheckpoint.Write(_results[RecordsCount - 1].NewPosition); _db.Config.WriterCheckpoint.Flush(); }
public ProjectionWorkerNode( Guid workerId, TFChunkDb db, IQueuedHandler inputQueue, ITimeProvider timeProvider, ISingletonTimeoutScheduler timeoutScheduler, ProjectionType runProjections, bool faultOutOfOrderProjections, IPublisher leaderOutputBus, ProjectionsStandardComponents configuration) { _runProjections = runProjections; Ensure.NotNull(db, "db"); _coreOutput = new InMemoryBus("Core Output"); _leaderOutputBus = leaderOutputBus; IPublisher publisher = CoreOutput; _subscriptionDispatcher = new ReaderSubscriptionDispatcher(publisher); _ioDispatcher = new IODispatcher(publisher, new PublishEnvelope(inputQueue), true); _eventReaderCoreService = new EventReaderCoreService( publisher, _ioDispatcher, 10, db.Config.WriterCheckpoint, runHeadingReader: runProjections >= ProjectionType.System, faultOutOfOrderProjections: faultOutOfOrderProjections); _feedReaderService = new FeedReaderService(_subscriptionDispatcher, timeProvider); if (runProjections >= ProjectionType.System) { _projectionCoreService = new ProjectionCoreService( workerId, inputQueue, publisher, _subscriptionDispatcher, timeProvider, _ioDispatcher, timeoutScheduler, configuration); } }
public void a_record_can_be_written() { _checkpoint = new InMemoryCheckpoint(0); var db = new TFChunkDb(new TFChunkDbConfig(PathName, new PrefixFileNamingStrategy(PathName, "prefix.tf"), 1000, 0, _checkpoint, new InMemoryCheckpoint(), new ICheckpoint[0])); db.OpenVerifyAndClean(); var tf = new TFChunkWriter(db); tf.Open(); var record = new PrepareLogRecord(logPosition: 0, correlationId: _correlationId, eventId: _eventId, transactionPosition: 0, transactionOffset: 0, eventStreamId: "WorldEnding", expectedVersion: 1234, timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: "type", data: new byte[] { 1, 2, 3, 4, 5 }, metadata: new byte[] { 7, 17 }); long tmp; tf.Write(record, out tmp); tf.Close(); db.Dispose(); Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix(), _checkpoint.Read()); using (var filestream = File.Open(Path.Combine(PathName, "prefix.tf0"), FileMode.Open, FileAccess.Read)) { filestream.Position = ChunkHeader.Size; var reader = new BinaryReader(filestream); reader.ReadInt32(); var read = LogRecord.ReadFrom(reader); Assert.AreEqual(record, read); } }
public void a_record_can_be_written() { _checkpoint = new InMemoryCheckpoint(0); var db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, _checkpoint, new InMemoryCheckpoint())); db.Open(); var tf = new TFChunkWriter(db); tf.Open(); var recordFactory = LogFormatHelper <TLogFormat, TStreamId> .RecordFactory; var streamId = LogFormatHelper <TLogFormat, TStreamId> .StreamId; var eventTypeId = LogFormatHelper <TLogFormat, TStreamId> .EventTypeId; var record = LogRecord.Prepare( factory: recordFactory, logPosition: 0, correlationId: _correlationId, eventId: _eventId, transactionPos: 0, transactionOffset: 0, eventStreamId: streamId, expectedVersion: 1234, timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: eventTypeId, data: new byte[] { 1, 2, 3, 4, 5 }, metadata: new byte[] { 7, 17 }); long tmp; tf.Write(record, out tmp); tf.Close(); db.Dispose(); Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix(), _checkpoint.Read()); using (var filestream = File.Open(GetFilePathFor("chunk-000000.000000"), FileMode.Open, FileAccess.Read)) { filestream.Position = ChunkHeader.Size; var reader = new BinaryReader(filestream); reader.ReadInt32(); var read = LogRecord.ReadFrom(reader, (int)reader.BaseStream.Length); Assert.AreEqual(record, read); } }
public StandardComponents( TFChunkDb db, IQueuedHandler mainQueue, ISubscriber mainBus, TimerService timerService, ITimeProvider timeProvider, IHttpForwarder httpForwarder, HttpService[] httpServices, IPublisher networkSendService) { _db = db; _mainQueue = mainQueue; _mainBus = mainBus; _timerService = timerService; _timeProvider = timeProvider; _httpForwarder = httpForwarder; _httpServices = httpServices; _networkSendService = networkSendService; }
public void with_file_of_wrong_size_database_corruption_is_detected() { var config = new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 10000, 0, new InMemoryCheckpoint(500), new InMemoryCheckpoint(), new InMemoryCheckpoint(-1), new InMemoryCheckpoint(-1)); using (var db = new TFChunkDb(config)) { File.WriteAllText(GetFilePathFor("chunk-000000.000000"), "this is just some test blahbydy blah"); Assert.That(() => db.Open(verifyHash: false), Throws.Exception.InstanceOf <CorruptDatabaseException>() .With.InnerException.InstanceOf <BadChunkInDatabaseException>()); } }
public void when_in_brand_new_extraneous_files_throws_corrupt_database_exception() { var config = new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 10000, 0, new InMemoryCheckpoint(0), new InMemoryCheckpoint(), new InMemoryCheckpoint(-1), new InMemoryCheckpoint(-1)); using (var db = new TFChunkDb(config)) { DbUtil.CreateSingleChunk(config, 4, GetFilePathFor("chunk-000004.000000")); Assert.That(() => db.Open(verifyHash: false), Throws.Exception.InstanceOf <CorruptDatabaseException>() .With.InnerException.InstanceOf <ExtraneousFileFoundException>()); } }
public void does_not_allow_first_completed_chunk_when_checkpoint_is_zero() { var config = new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 10000, 0, new InMemoryCheckpoint(), new InMemoryCheckpoint(), new InMemoryCheckpoint(-1), new InMemoryCheckpoint(-1)); using (var db = new TFChunkDb(config)) { DbUtil.CreateSingleChunk(config, 0, GetFilePathFor("chunk-000000.000000")); Assert.That(() => db.Open(verifyHash: false), Throws.Exception.InstanceOf <CorruptDatabaseException>() .With.InnerException.InstanceOf <BadChunkInDatabaseException>()); } }
public void with_not_enough_files_to_reach_checksum_throws() { var config = new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 10000, 0, new InMemoryCheckpoint(15000), new InMemoryCheckpoint(), new InMemoryCheckpoint(-1), new InMemoryCheckpoint(-1)); using (var db = new TFChunkDb(config)) { DbUtil.CreateSingleChunk(config, 0, GetFilePathFor("chunk-000000.000000")); Assert.That(() => db.Open(verifyHash: false), Throws.Exception.InstanceOf <CorruptDatabaseException>() .With.InnerException.InstanceOf <ChunkNotFoundException>()); } }
public void try_read_returns_false_when_writer_checksum_is_zero() { var writerchk = new InMemoryCheckpoint(0); var db = new TFChunkDb(new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 10000, 0, writerchk, new InMemoryCheckpoint())); db.OpenVerifyAndClean(); var reader = new TFChunkSequentialReader(db, writerchk, 0); LogRecord record; Assert.IsFalse(reader.TryReadNext(out record)); db.Close(); }
public void with_a_writer_checksum_of_zero_the_first_chunk_is_created_with_correct_name() { var config = new TFChunkDbConfig(PathName, new PrefixFileNamingStrategy(PathName, "prefix.tf"), 10000, 0, new InMemoryCheckpoint(0), new ICheckpoint[0]); var db = new TFChunkDb(config); db.OpenVerifyAndClean(); db.Dispose(); Assert.AreEqual(1, Directory.GetFiles(PathName).Length); Assert.IsTrue(File.Exists(Path.Combine(PathName, "prefix.tf0"))); var fileInfo = new FileInfo(Path.Combine(PathName, "prefix.tf0")); Assert.AreEqual(10000 + ChunkHeader.Size + ChunkFooter.Size, fileInfo.Length); }
public void try_read_returns_false_when_writer_checkpoint_is_zero() { var writerchk = new InMemoryCheckpoint(0); var chaserchk = new InMemoryCheckpoint(); var db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, writerchk, chaserchk)); db.Open(); var chaser = new TFChunkChaser(db, writerchk, new InMemoryCheckpoint(), false); chaser.Open(); ILogRecord record; Assert.IsFalse(chaser.TryReadNext(out record)); chaser.Close(); db.Dispose(); }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); _db = new TFChunkDb(new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 4096, 0, new InMemoryCheckpoint(), new InMemoryCheckpoint(), new InMemoryCheckpoint(-1), new InMemoryCheckpoint(-1))); _db.Open(); var chunk = _db.Manager.GetChunk(0); _records = new LogRecord[RecordsCount]; _results = new RecordWriteResult[RecordsCount]; var pos = 0; for (int i = 0; i < RecordsCount; ++i) { if (i > 0 && i % 3 == 0) { pos = i / 3 * _db.Config.ChunkSize; chunk.Complete(); chunk = _db.Manager.AddNewChunk(); } _records[i] = LogRecord.SingleWrite(pos, Guid.NewGuid(), Guid.NewGuid(), "es1", ExpectedVersion.Any, "et1", new byte[1200], new byte[] { 5, 7 }); _results[i] = chunk.TryAppend(_records[i]); pos += _records[i].GetSizeWithLengthPrefixAndSuffix(); } chunk.Flush(); _db.Config.WriterCheckpoint.Write((RecordsCount / 3) * _db.Config.ChunkSize + _results[RecordsCount - 1].NewPosition); _db.Config.WriterCheckpoint.Flush(); }
public void a_record_can_be_written() { var filename = GetFilePathFor("chunk-000000.000000"); var chunkHeader = new ChunkHeader(TFChunk.CurrentChunkVersion, 10000, 0, 0, false, chunkId: Guid.NewGuid()); var chunkBytes = chunkHeader.AsByteArray(); var bytes = new byte[ChunkHeader.Size + 10000 + ChunkFooter.Size]; Buffer.BlockCopy(chunkBytes, 0, bytes, 0, chunkBytes.Length); File.WriteAllBytes(filename, bytes); _checkpoint = new InMemoryCheckpoint(137); var db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, _checkpoint, new InMemoryCheckpoint())); db.Open(); var tf = new TFChunkWriter(db); var record = new PrepareLogRecord(logPosition: _checkpoint.Read(), correlationId: _correlationId, eventId: _eventId, expectedVersion: 1234, transactionPosition: 0, transactionOffset: 0, eventStreamId: "WorldEnding", timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: "type", data: new byte[] { 1, 2, 3, 4, 5 }, metadata: new byte[] { 7, 17 }); long tmp; tf.Write(record, out tmp); tf.Close(); db.Dispose(); Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix() + 137, _checkpoint.Read()); //137 is fluff assigned to beginning of checkpoint using (var filestream = File.Open(filename, FileMode.Open, FileAccess.Read)) { filestream.Seek(ChunkHeader.Size + 137 + sizeof(int), SeekOrigin.Begin); var reader = new BinaryReader(filestream); var read = LogRecord.ReadFrom(reader); Assert.AreEqual(record, read); } }
public void temporary_files_are_removed() { var config = TFChunkHelper.CreateDbConfig(PathName, 150, chunkSize: 100); using (var db = new TFChunkDb(config)) { DbUtil.CreateSingleChunk(config, 0, GetFilePathFor("chunk-000000.000000")); DbUtil.CreateOngoingChunk(config, 1, GetFilePathFor("chunk-000001.000001")); File.Create(GetFilePathFor("bla")).Close(); File.Create(GetFilePathFor("bla.scavenge.tmp")).Close(); File.Create(GetFilePathFor("bla.tmp")).Close(); Assert.DoesNotThrow(() => db.Open(verifyHash: false)); Assert.IsTrue(File.Exists(GetFilePathFor("chunk-000000.000000"))); Assert.IsTrue(File.Exists(GetFilePathFor("chunk-000001.000001"))); Assert.IsTrue(File.Exists(GetFilePathFor("bla"))); Assert.AreEqual(3, Directory.GetFiles(PathName, "*").Length); } }
public void when_checkpoint_is_on_boundary_of_new_chunk_and_last_chunk_is_truncated_but_not_completed_exception_is_thrown() { var config = new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 100, 0, new InMemoryCheckpoint(200), new InMemoryCheckpoint(), new ICheckpoint[0]); var db = new TFChunkDb(config); CreateChunk(Path.Combine(PathName, "chunk-000000.000000"), config.ChunkSize, config.ChunkSize); CreateOngoingChunk(Path.Combine(PathName, "chunk-000001.000001"), config.ChunkSize - 10, config.ChunkSize); var ex = Assert.Throws <CorruptDatabaseException>(() => db.OpenVerifyAndClean(verifyHash: false)); Assert.IsInstanceOf <BadChunkInDatabaseException>(ex.InnerException); db.Dispose(); }
public StorageScavenger(TFChunkDb db, IODispatcher ioDispatcher, ITableIndex tableIndex, IHasher hasher, IReadIndex readIndex, bool alwaysKeepScavenged, string nodeEndpoint, bool mergeChunks, int scavengeHistoryMaxAge) { Ensure.NotNull(db, "db"); Ensure.NotNull(ioDispatcher, "ioDispatcher"); Ensure.NotNull(tableIndex, "tableIndex"); Ensure.NotNull(hasher, "hasher"); Ensure.NotNull(readIndex, "readIndex"); Ensure.NotNull(nodeEndpoint, "nodeEndpoint"); _db = db; _ioDispatcher = ioDispatcher; _tableIndex = tableIndex; _hasher = hasher; _readIndex = readIndex; _alwaysKeepScavenged = alwaysKeepScavenged; _mergeChunks = mergeChunks; _nodeEndpoint = nodeEndpoint; _scavengeHistoryMaxAge = scavengeHistoryMaxAge; }
public void with_wrong_size_file_less_than_checksum_throws() { var config = new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 10000, 0, new InMemoryCheckpoint(15000), new InMemoryCheckpoint(), new InMemoryCheckpoint(-1), new InMemoryCheckpoint(-1)); using (var db = new TFChunkDb(config)) { DbUtil.CreateSingleChunk(config, 0, GetFilePathFor("chunk-000000.000000")); DbUtil.CreateSingleChunk(config, 1, GetFilePathFor("chunk-000001.000000"), actualDataSize: config.ChunkSize - 1000); Assert.That(() => db.Open(verifyHash: false), Throws.Exception.InstanceOf <CorruptDatabaseException>() .With.InnerException.InstanceOf <BadChunkInDatabaseException>()); } }
public void does_not_allow_pre_last_chunk_to_be_not_completed_when_checksum_is_exactly_in_between_two_chunks_and_next_chunk_exists() { var config = new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 10000, 0, new InMemoryCheckpoint(10000), new InMemoryCheckpoint(), new InMemoryCheckpoint(-1), new InMemoryCheckpoint(-1)); using (var db = new TFChunkDb(config)) { DbUtil.CreateOngoingChunk(config, 0, GetFilePathFor("chunk-000000.000000")); DbUtil.CreateOngoingChunk(config, 1, GetFilePathFor("chunk-000001.000000")); Assert.That(() => db.Open(verifyHash: false), Throws.Exception.InstanceOf <CorruptDatabaseException>() .With.InnerException.InstanceOf <BadChunkInDatabaseException>()); } }
protected override void Create(SingleNodeOptions options) { var dbPath = ResolveDbPath(options.DbPath, options.HttpPort); var db = new TFChunkDb(CreateDbConfig(dbPath, options.ChunksToCache)); var vnodeSettings = GetVNodeSettings(options); var appSettings = new SingleVNodeAppSettings(TimeSpan.FromSeconds(options.StatsPeriodSec)); var dbVerifyHashes = !options.DoNotVerifyDbHashesOnStartup; _node = new SingleVNode(db, vnodeSettings, appSettings, dbVerifyHashes); if (!options.NoProjections) { _projections = new Projections.Core.Projections(db, _node.MainQueue, _node.Bus, _node.TimerService, _node.HttpService, options.ProjectionThreads); } }
public override async Task TestFixtureSetUp() { await base.TestFixtureSetUp(); _mainBus = new InMemoryBus(nameof(when_starting_having_TFLog_with_existing_epochs <TLogFormat, TStreamId>)); _mainBus.Subscribe(new AdHocHandler <SystemMessage.EpochWritten>(m => _published.Add(m))); _db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, 0)); _db.Open(); _reader = new TFChunkReader(_db, _db.Config.WriterCheckpoint); _writer = new TFChunkWriter(_db); _epochs = new List <EpochRecord>(); var lastPos = 0L; for (int i = 0; i < 30; i++) { var epoch = WriteEpoch(GetNextEpoch(), lastPos, _instanceId); _epochs.Add(epoch); lastPos = epoch.EpochPosition; } }
public ClusterStorageWriterService(IPublisher bus, ISubscriber subscribeToBus, TimeSpan minFlushDelay, TFChunkDb db, TFChunkWriter writer, IIndexWriter indexWriter, IEpochManager epochManager, Func <long> getLastCommitPosition) : base(bus, subscribeToBus, minFlushDelay, db, writer, indexWriter, epochManager) { Ensure.NotNull(getLastCommitPosition, "getLastCommitPosition"); _getLastCommitPosition = getLastCommitPosition; _framer = new LengthPrefixSuffixFramer(OnLogRecordUnframed, TFConsts.MaxLogRecordSize); SubscribeToMessage <ReplicationMessage.ReplicaSubscribed>(); SubscribeToMessage <ReplicationMessage.CreateChunk>(); SubscribeToMessage <ReplicationMessage.RawChunkBulk>(); SubscribeToMessage <ReplicationMessage.DataChunkBulk>(); }
public void try_read_returns_false_when_writer_checksum_is_zero() { var writerchk = new InMemoryCheckpoint(0); var db = new TFChunkDb(new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 10000, 0, writerchk, new InMemoryCheckpoint(), new InMemoryCheckpoint(-1), new InMemoryCheckpoint(-1))); db.Open(); var reader = new TFChunkReader(db, writerchk, 0); Assert.IsFalse(reader.TryReadNext().Success); db.Close(); }