public StorageWriter(IPublisher bus, ISubscriber subscriber, TFChunkWriter writer, IReadIndex readIndex) { Ensure.NotNull(bus, "bus"); Ensure.NotNull(subscriber, "subscriber"); Ensure.NotNull(writer, "writer"); Ensure.NotNull(readIndex, "readIndex"); Bus = bus; _subscriber = subscriber; ReadIndex = readIndex; _flushDelay = 0; _lastFlush = _watch.ElapsedTicks; Writer = writer; Writer.Open(); _writerBus = new InMemoryBus("StorageWriterBus", watchSlowMsg: true, slowMsgThresholdMs: 500); _storageWriterQueue = new QueuedHandler(_writerBus, "StorageWriterQueue", watchSlowMsg: false); _storageWriterQueue.Start(); SubscribeToMessage <SystemMessage.SystemInit>(); SubscribeToMessage <SystemMessage.BecomeShuttingDown>(); SubscribeToMessage <StorageMessage.WritePrepares>(); SubscribeToMessage <StorageMessage.WriteDelete>(); SubscribeToMessage <StorageMessage.WriteTransactionStart>(); SubscribeToMessage <StorageMessage.WriteTransactionData>(); SubscribeToMessage <StorageMessage.WriteTransactionPrepare>(); SubscribeToMessage <StorageMessage.WriteCommit>(); }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); WriterCheckpoint = new InMemoryCheckpoint(0); var chaserchk = new InMemoryCheckpoint(Checkpoint.Chaser, 0); Db = new TFChunkDb(new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 10000, 0, WriterCheckpoint, new[] { chaserchk })); Db.OpenVerifyAndClean(); // create db Writer = new TFChunkWriter(Db); Writer.Open(); WriteTestScenario(); Writer.Close(); Writer = null; WriterCheckpoint.Flush(); chaserchk.Write(WriterCheckpoint.Read()); chaserchk.Flush(); TableIndex = new TableIndex(Path.Combine(PathName, "index"), () => new HashListMemTable(), _maxEntriesInMemTable); TableIndex.Initialize(); var reader = new TFChunkReader(Db, Db.Config.WriterCheckpoint); ReadIndex = new ReadIndex(new NoopPublisher(), 2, () => new TFChunkSequentialReader(Db, Db.Config.WriterCheckpoint, 0), () => reader, TableIndex, new ByLengthHasher()); ReadIndex.Build(); }
public void try_read_does_not_cache_anything_and_returns_record_once_it_is_written_later() { var writerchk = new InMemoryCheckpoint(0); var db = new TFChunkDb(new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 10000, 0, writerchk, new InMemoryCheckpoint())); db.OpenVerifyAndClean(); var writer = new TFChunkWriter(db); writer.Open(); var reader = new TFChunkSequentialReader(db, writerchk, 0); LogRecord record; Assert.IsFalse(reader.TryReadNext(out record)); var rec = LogRecord.SingleWrite(0, Guid.NewGuid(), Guid.NewGuid(), "ES", -1, "ET", new byte[] { 7 }, null); long tmp; Assert.IsTrue(writer.Write(rec, out tmp)); writer.Flush(); writer.Close(); Assert.IsTrue(reader.TryReadNext(out record)); Assert.AreEqual(rec, record); reader.Close(); db.Close(); }
public override async Task TestFixtureSetUp() { await base.TestFixtureSetUp(); Db = new TFChunkDb(CreateDbConfig()); Db.Open(); Chaser = new TFChunkChaser(Db, _writerChk, _chaserChk, false); Chaser.Open(); Writer = new TFChunkWriter(Db); Writer.Open(); IndexCommitter = new FakeIndexCommitterService <TStreamId>(); EpochManager = new FakeEpochManager(); Service = new StorageChaser <TStreamId>( Publisher, _writerChk, Chaser, IndexCommitter, EpochManager, new QueueStatsManager()); Service.Handle(new SystemMessage.SystemStart()); Service.Handle(new SystemMessage.SystemInit()); Publisher.Subscribe(new AdHocHandler <StorageMessage.CommitAck>(CommitAcks.Enqueue)); Publisher.Subscribe(new AdHocHandler <StorageMessage.PrepareAck>(PrepareAcks.Enqueue)); When(); }
public override async Task TestFixtureSetUp() { await base.TestFixtureSetUp(); _mainBus = new InMemoryBus(nameof(when_having_an_epoch_manager_and_empty_tf_log <TLogFormat, TStreamId>)); _mainBus.Subscribe(new AdHocHandler <SystemMessage.EpochWritten>(m => _published.Add(m))); _db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, 0)); _db.Open(); _reader = new TFChunkReader(_db, _db.Config.WriterCheckpoint); _writer = new TFChunkWriter(_db); _epochManager = GetManager(); _epochManager.Init(); _cache = GetCache(_epochManager); Assert.NotNull(_cache); Assert.That(_cache.Count == 0); _epochs = new List <EpochRecord>(); var lastPos = 0L; for (int i = 0; i < 30; i++) { var epoch = WriteEpoch(GetNextEpoch(), lastPos, _instanceId); _epochs.Add(epoch); lastPos = epoch.EpochPosition; } }
protected static void WriteSingleEvent( ICheckpoint writerCheckpoint, TFChunkWriter writer, string eventStreamId, long eventNumber, ReadOnlyMemory <byte> data, DateTime?timestamp = null, Uuid eventId = default, string eventType = "some-type") { var prepare = LogRecord.SingleWrite(writerCheckpoint.ReadNonFlushed(), Guid.NewGuid(), (eventId == default ? Uuid.NewUuid() : eventId).ToGuid(), eventStreamId, eventNumber - 1, eventType, data.ToArray(), null, timestamp); Assert.True(writer.Write(prepare, out _)); var commit = LogRecord.Commit( writerCheckpoint.ReadNonFlushed(), prepare.CorrelationId, prepare.LogPosition, eventNumber); Assert.True(writer.Write(commit, out _)); }
public override async Task TestFixtureSetUp() { await base.TestFixtureSetUp(); var indexDirectory = GetFilePathFor("index"); _logFormat = LogFormatHelper <TLogFormat, TStreamId> .LogFormatFactory.Create(new() { IndexDirectory = indexDirectory, }); _mainBus = new InMemoryBus(nameof(when_starting_having_TFLog_with_existing_epochs <TLogFormat, TStreamId>)); _mainBus.Subscribe(new AdHocHandler <SystemMessage.EpochWritten>(m => _published.Add(m))); _db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, 0)); _db.Open(); _reader = new TFChunkReader(_db, _db.Config.WriterCheckpoint); _writer = new TFChunkWriter(_db); _epochs = new List <EpochRecord>(); var lastPos = 0L; for (int i = 0; i < 30; i++) { var epoch = WriteEpoch(GetNextEpoch(), lastPos, _instanceId); _epochs.Add(epoch); lastPos = epoch.EpochPosition; } }
public void try_read_does_not_cache_anything_and_returns_record_once_it_is_written_later() { var writerchk = new InMemoryCheckpoint(0); var chaserchk = new InMemoryCheckpoint(0); var db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, writerchk, chaserchk)); db.Open(); var writer = new TFChunkWriter(db); writer.Open(); var reader = new TFChunkReader(db, writerchk, 0); Assert.IsFalse(reader.TryReadNext().Success); var rec = LogRecord.SingleWrite(0, Guid.NewGuid(), Guid.NewGuid(), "ES", -1, "ET", new byte[] { 7 }, null); long tmp; Assert.IsTrue(writer.Write(rec, out tmp)); writer.Flush(); writer.Close(); var res = reader.TryReadNext(); Assert.IsTrue(res.Success); Assert.AreEqual(rec, res.LogRecord); db.Close(); }
public override async Task TestFixtureSetUp() { await base.TestFixtureSetUp(); WriterCheckpoint = new InMemoryCheckpoint(0); ChaserCheckpoint = new InMemoryCheckpoint(0); ReplicationCheckpoint = new InMemoryCheckpoint(-1); Db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, WriterCheckpoint, ChaserCheckpoint, replicationCheckpoint: ReplicationCheckpoint)); Db.Open(); // create db Writer = new TFChunkWriter(Db); Writer.Open(); WriteTestScenario(); Writer.Close(); Writer = null; WriterCheckpoint.Flush(); ChaserCheckpoint.Write(WriterCheckpoint.Read()); ChaserCheckpoint.Flush(); var readers = new ObjectPool <ITransactionFileReader>("Readers", 2, 5, () => new TFChunkReader(Db, Db.Config.WriterCheckpoint)); var lowHasher = new XXHashUnsafe(); var highHasher = new Murmur3AUnsafe(); TableIndex = new TableIndex(GetFilePathFor("index"), lowHasher, highHasher, () => new HashListMemTable(IndexBitnessVersion, MaxEntriesInMemTable * 2), () => new TFReaderLease(readers), IndexBitnessVersion, int.MaxValue, Constants.PTableMaxReaderCountDefault, MaxEntriesInMemTable); ReadIndex = new ReadIndex(new NoopPublisher(), readers, TableIndex, 0, additionalCommitChecks: PerformAdditionalCommitChecks, metastreamMaxCount: MetastreamMaxCount, hashCollisionReadLimit: Opts.HashCollisionReadLimitDefault, skipIndexScanOnReads: Opts.SkipIndexScanOnReadsDefault, replicationCheckpoint: Db.Config.ReplicationCheckpoint); ReadIndex.Init(ChaserCheckpoint.Read()); // scavenge must run after readIndex is built if (_scavenge) { if (_completeLastChunkOnScavenge) { Db.Manager.GetChunk(Db.Manager.ChunksCount - 1).Complete(); } _scavenger = new TFChunkScavenger(Db, new FakeTFScavengerLog(), TableIndex, ReadIndex); await _scavenger.Scavenge(alwaysKeepScavenged : true, mergeChunks : _mergeChunks); } }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); WriterCheckpoint = new InMemoryCheckpoint(0); ChaserCheckpoint = new InMemoryCheckpoint(0); Db = new TFChunkDb(new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 10000, 0, WriterCheckpoint, ChaserCheckpoint, new InMemoryCheckpoint(-1), new InMemoryCheckpoint(-1))); Db.Open(); // create db Writer = new TFChunkWriter(Db); Writer.Open(); WriteTestScenario(); Writer.Close(); Writer = null; WriterCheckpoint.Flush(); ChaserCheckpoint.Write(WriterCheckpoint.Read()); ChaserCheckpoint.Flush(); var readers = new ObjectPool <ITransactionFileReader>("Readers", 2, 2, () => new TFChunkReader(Db, Db.Config.WriterCheckpoint)); TableIndex = new TableIndex(GetFilePathFor("index"), () => new HashListMemTable(MaxEntriesInMemTable * 2), () => new TFReaderLease(readers), MaxEntriesInMemTable); var hasher = new ByLengthHasher(); ReadIndex = new ReadIndex(new NoopPublisher(), readers, TableIndex, hasher, 0, additionalCommitChecks: true, metastreamMaxCount: MetastreamMaxCount); ReadIndex.Init(ChaserCheckpoint.Read()); // scavenge must run after readIndex is built if (_scavenge) { if (_completeLastChunkOnScavenge) { Db.Manager.GetChunk(Db.Manager.ChunksCount - 1).Complete(); } _scavenger = new TFChunkScavenger(Db, TableIndex, hasher, ReadIndex); _scavenger.Scavenge(alwaysKeepScavenged: true, mergeChunks: _mergeChunks); } }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); string dbPath = Path.Combine(PathName, string.Format("mini-node-db-{0}", Guid.NewGuid())); Bus = new InMemoryBus("bus"); IODispatcher = new IODispatcher(Bus, new PublishEnvelope(Bus)); if (!Directory.Exists(dbPath)) { Directory.CreateDirectory(dbPath); } var writerCheckFilename = Path.Combine(dbPath, Checkpoint.Writer + ".chk"); var chaserCheckFilename = Path.Combine(dbPath, Checkpoint.Chaser + ".chk"); if (Runtime.IsMono) { WriterCheckpoint = new FileCheckpoint(writerCheckFilename, Checkpoint.Writer, cached: true); ChaserCheckpoint = new FileCheckpoint(chaserCheckFilename, Checkpoint.Chaser, cached: true); } else { WriterCheckpoint = new MemoryMappedFileCheckpoint(writerCheckFilename, Checkpoint.Writer, cached: true); ChaserCheckpoint = new MemoryMappedFileCheckpoint(chaserCheckFilename, Checkpoint.Chaser, cached: true); } Db = new TFChunkDb(new TFChunkDbConfig(dbPath, new VersionedPatternFileNamingStrategy(dbPath, "chunk-"), TFConsts.ChunkSize, 0, WriterCheckpoint, ChaserCheckpoint, new InMemoryCheckpoint(-1), new InMemoryCheckpoint(-1), inMemDb: false)); Db.Open(); // create DB Writer = new TFChunkWriter(Db); Writer.Open(); WriteTestScenario(); Writer.Close(); Writer = null; WriterCheckpoint.Flush(); ChaserCheckpoint.Write(WriterCheckpoint.Read()); ChaserCheckpoint.Flush(); Db.Close(); // start node with our created DB Node = new MiniNode(PathName, inMemDb: false, dbPath: dbPath); Node.Start(); Given(); }
public override async Task TestFixtureSetUp() { await base.TestFixtureSetUp(); string dbPath = Path.Combine(PathName, string.Format("mini-node-db-{0}", Guid.NewGuid())); _logFormatFactory = LogFormatHelper <TLogFormat, TStreamId> .LogFormatFactory.Create(new() { IndexDirectory = GetFilePathFor("index"), }); Bus = new InMemoryBus("bus"); IODispatcher = new IODispatcher(Bus, new PublishEnvelope(Bus)); if (!Directory.Exists(dbPath)) { Directory.CreateDirectory(dbPath); } var writerCheckFilename = Path.Combine(dbPath, Checkpoint.Writer + ".chk"); var chaserCheckFilename = Path.Combine(dbPath, Checkpoint.Chaser + ".chk"); WriterCheckpoint = new MemoryMappedFileCheckpoint(writerCheckFilename, Checkpoint.Writer, cached: true); ChaserCheckpoint = new MemoryMappedFileCheckpoint(chaserCheckFilename, Checkpoint.Chaser, cached: true); Db = new TFChunkDb(TFChunkHelper.CreateDbConfig(dbPath, WriterCheckpoint, ChaserCheckpoint, TFConsts.ChunkSize)); Db.Open(); // create DB Writer = new TFChunkWriter(Db); Writer.Open(); var pm = _logFormatFactory.CreatePartitionManager( reader: new TFChunkReader(Db, WriterCheckpoint), writer: Writer); pm.Initialize(); WriteTestScenario(); Writer.Close(); Writer = null; WriterCheckpoint.Flush(); ChaserCheckpoint.Write(WriterCheckpoint.Read()); ChaserCheckpoint.Flush(); Db.Close(); // start node with our created DB Node = new MiniNode <TLogFormat, TStreamId>(PathName, inMemDb: false, dbPath: dbPath); await Node.Start(); try { await Given().WithTimeout(); } catch (Exception ex) { throw new Exception("Given Failed", ex); } }
public void a_record_can_be_written() { var filename = GetFilePathFor("chunk-000000.000000"); var chunkHeader = new ChunkHeader(TFChunk.CurrentChunkVersion, 10000, 0, 0, false, Guid.NewGuid()); var chunkBytes = chunkHeader.AsByteArray(); var buf = new byte[ChunkHeader.Size + ChunkFooter.Size + chunkHeader.ChunkSize]; Buffer.BlockCopy(chunkBytes, 0, buf, 0, chunkBytes.Length); File.WriteAllBytes(filename, buf); _checkpoint = new InMemoryCheckpoint(137); var db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, _checkpoint, new InMemoryCheckpoint(), chunkSize: chunkHeader.ChunkSize)); db.Open(); var bytes = new byte[3994]; // this gives exactly 4097 size of record, with 3993 (rec size 4096) everything works fine! new Random().NextBytes(bytes); var writer = new TFChunkWriter(db); var logFormat = LogFormatHelper <TLogFormat, TStreamId> .LogFormat; logFormat.StreamNameIndex.GetOrAddId("WorldEnding", out var streamId, out _, out _); var record = LogRecord.Prepare( factory: logFormat.RecordFactory, logPosition: 137, correlationId: _correlationId, eventId: _eventId, transactionPos: 789, transactionOffset: 543, eventStreamId: streamId, expectedVersion: 1234, timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.SingleWrite, eventType: "type", data: bytes, metadata: new byte[] { 0x07, 0x17 }); long pos; Assert.IsTrue(writer.Write(record, out pos)); writer.Close(); db.Dispose(); Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix() + 137, _checkpoint.Read()); using (var filestream = File.Open(filename, FileMode.Open, FileAccess.Read)) { filestream.Seek(ChunkHeader.Size + 137 + sizeof(int), SeekOrigin.Begin); var reader = new BinaryReader(filestream); var read = LogRecord.ReadFrom(reader, (int)reader.BaseStream.Length); Assert.AreEqual(record, read); } }
public void a_record_can_be_written() { var filename = Path.Combine(PathName, "prefix.tf0"); var chunkHeader = new ChunkHeader(TFChunk.CurrentChunkVersion, 10000, 0, 0, 0); var chunkBytes = chunkHeader.AsByteArray(); var buf = new byte[ChunkHeader.Size + ChunkFooter.Size + chunkHeader.ChunkSize]; Buffer.BlockCopy(chunkBytes, 0, buf, 0, chunkBytes.Length); File.WriteAllBytes(filename, buf); _checkpoint = new InMemoryCheckpoint(137); var db = new TFChunkDb(new TFChunkDbConfig(PathName, new PrefixFileNamingStrategy(PathName, "prefix.tf"), chunkHeader.ChunkSize, 0, _checkpoint, new ICheckpoint[0])); db.OpenVerifyAndClean(); var writer = new TFChunkWriter(db); var record = new PrepareLogRecord(logPosition: 0, correlationId: _correlationId, eventId: _eventId, transactionPosition: 0, transactionOffset: 0, eventStreamId: "WorldEnding", expectedVersion: 1234, timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: "type", data: new byte[8000], metadata: new byte[] { 7, 17 }); Console.WriteLine(record.GetSizeWithLengthPrefixAndSuffix()); Console.WriteLine(record.GetSizeWithLengthPrefixAndSuffix() + 137); long pos; Assert.IsTrue(writer.Write(record, out pos)); writer.Close(); db.Dispose(); Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix() + 137, _checkpoint.Read()); using (var filestream = File.Open(filename, FileMode.Open, FileAccess.Read)) { filestream.Seek(ChunkHeader.Size + 137 + sizeof(int), SeekOrigin.Begin); var reader = new BinaryReader(filestream); var read = LogRecord.ReadFrom(reader); Console.WriteLine(string.Join("\n", Directory.EnumerateFiles(PathName))); Assert.AreEqual(record, read); } }
public override async Task TestFixtureSetUp() { await base.TestFixtureSetUp(); string dbPath = Path.Combine(PathName, string.Format("mini-node-db-{0}", Guid.NewGuid())); Bus = new InMemoryBus("bus"); IODispatcher = new IODispatcher(Bus, new PublishEnvelope(Bus)); if (!Directory.Exists(dbPath)) { Directory.CreateDirectory(dbPath); } var writerCheckFilename = Path.Combine(dbPath, Checkpoint.Writer + ".chk"); var chaserCheckFilename = Path.Combine(dbPath, Checkpoint.Chaser + ".chk"); if (Runtime.IsMono) { WriterCheckpoint = new FileCheckpoint(writerCheckFilename, Checkpoint.Writer, cached: true); ChaserCheckpoint = new FileCheckpoint(chaserCheckFilename, Checkpoint.Chaser, cached: true); } else { WriterCheckpoint = new MemoryMappedFileCheckpoint(writerCheckFilename, Checkpoint.Writer, cached: true); ChaserCheckpoint = new MemoryMappedFileCheckpoint(chaserCheckFilename, Checkpoint.Chaser, cached: true); } Db = new TFChunkDb(TFChunkHelper.CreateDbConfig(dbPath, WriterCheckpoint, ChaserCheckpoint, TFConsts.ChunkSize)); Db.Open(); // create DB Writer = new TFChunkWriter(Db); Writer.Open(); WriteTestScenario(); Writer.Close(); Writer = null; WriterCheckpoint.Flush(); ChaserCheckpoint.Write(WriterCheckpoint.Read()); ChaserCheckpoint.Flush(); Db.Close(); // start node with our created DB Node = new MiniNode(PathName, inMemDb: false, dbPath: dbPath); await Node.Start(); try { await Given().WithTimeout(); } catch (Exception ex) { throw new Exception("Given Failed", ex); } }
public override async Task TestFixtureSetUp() { await base.TestFixtureSetUp(); _mainBus = new InMemoryBus(nameof(when_starting_having_TFLog_with_no_epochs <TLogFormat, TStreamId>)); _mainBus.Subscribe(new AdHocHandler <SystemMessage.EpochWritten>(m => _published.Add(m))); _db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, 0)); _db.Open(); _reader = new TFChunkReader(_db, _db.Config.WriterCheckpoint); _writer = new TFChunkWriter(_db); }
public override async Task TestFixtureSetUp() { await base.TestFixtureSetUp(); _mainBus = new InMemoryBus(nameof(when_having_an_epoch_manager_and_empty_tf_log)); _mainBus.Subscribe(new AdHocHandler <SystemMessage.EpochWritten>(m => _published.Add(m))); _db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, 0)); _db.Open(); _reader = new TFChunkReader(_db, _db.Config.WriterCheckpoint); _writer = new TFChunkWriter(_db); }
public void a_record_can_be_written() { var filename = Path.Combine(PathName, "prefix.tf0"); var chunkHeader = new ChunkHeader(TFChunk.CurrentChunkVersion, 10000, 0, 0, 0); var chunkBytes = chunkHeader.AsByteArray(); var buf = new byte[ChunkHeader.Size + ChunkFooter.Size + chunkHeader.ChunkSize]; Buffer.BlockCopy(chunkBytes, 0, buf, 0, chunkBytes.Length); File.WriteAllBytes(filename, buf); _checkpoint = new InMemoryCheckpoint(137); var db = new TFChunkDb(new TFChunkDbConfig(PathName, new PrefixFileNamingStrategy(PathName, "prefix.tf"), chunkHeader.ChunkSize, 0, _checkpoint, new InMemoryCheckpoint(), new ICheckpoint[0])); db.OpenVerifyAndClean(); var bytes = new byte[3994]; // this gives exactly 4097 size of record, with 3993 (rec size 4096) everything works fine! new Random().NextBytes(bytes); var writer = new TFChunkWriter(db); var record = new PrepareLogRecord(logPosition: 123, correlationId: _correlationId, eventId: _eventId, transactionPosition: 789, transactionOffset: 543, eventStreamId: "WorldEnding", expectedVersion: 1234, timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.SingleWrite, eventType: "type", data: bytes, metadata: new byte[] { 0x07, 0x17 }); long pos; Assert.IsTrue(writer.Write(record, out pos)); writer.Close(); db.Dispose(); Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix() + 137, _checkpoint.Read()); using (var filestream = File.Open(filename, FileMode.Open, FileAccess.Read)) { filestream.Seek(ChunkHeader.Size + 137 + sizeof(int), SeekOrigin.Begin); var reader = new BinaryReader(filestream); var read = LogRecord.ReadFrom(reader); Assert.AreEqual(record, read); } }
public void try_read_returns_record_when_record_bigger_than_internal_buffer() { var writerchk = new InMemoryCheckpoint(0); var chaserchk = new InMemoryCheckpoint(Checkpoint.Chaser, 0); var db = new TFChunkDb(new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 10000, 0, writerchk, chaserchk, new InMemoryCheckpoint(-1), new InMemoryCheckpoint(-1))); db.Open(); var recordToWrite = new PrepareLogRecord(logPosition: 0, correlationId: _correlationId, eventId: _eventId, transactionPosition: 0, transactionOffset: 0, eventStreamId: "WorldEnding", expectedVersion: 1234, timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: "type", data: new byte[9000], metadata: new byte[] { 7, 17 }); var writer = new TFChunkWriter(db); writer.Open(); long pos; Assert.IsTrue(writer.Write(recordToWrite, out pos)); writer.Close(); writerchk.Write(recordToWrite.GetSizeWithLengthPrefixAndSuffix()); var reader = new TFChunkChaser(db, writerchk, chaserchk); reader.Open(); LogRecord record; var readRecord = reader.TryReadNext(out record); reader.Close(); Assert.IsTrue(readRecord); Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix(), chaserchk.Read()); Assert.AreEqual(recordToWrite, record); db.Close(); }
public void try_read_returns_record_when_record_bigger_than_internal_buffer() { var writerchk = new InMemoryCheckpoint(0); var chaserchk = new InMemoryCheckpoint(Checkpoint.Chaser, 0); var db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, writerchk, chaserchk)); db.Open(); var recordFactory = LogFormatHelper <TLogFormat, TStreamId> .RecordFactory; var streamId = LogFormatHelper <TLogFormat, TStreamId> .StreamId; var eventTypeId = LogFormatHelper <TLogFormat, TStreamId> .EventTypeId; var recordToWrite = LogRecord.Prepare( factory: recordFactory, logPosition: 0, correlationId: _correlationId, eventId: _eventId, transactionPos: 0, transactionOffset: 0, eventStreamId: streamId, expectedVersion: 1234, timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: eventTypeId, data: new byte[9000], metadata: new byte[] { 7, 17 }); var writer = new TFChunkWriter(db); writer.Open(); long pos; Assert.IsTrue(writer.Write(recordToWrite, out pos)); writer.Close(); writerchk.Write(recordToWrite.GetSizeWithLengthPrefixAndSuffix()); var reader = new TFChunkChaser(db, writerchk, chaserchk, false); reader.Open(); ILogRecord record; var readRecord = reader.TryReadNext(out record); reader.Close(); Assert.IsTrue(readRecord); Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix(), chaserchk.Read()); Assert.AreEqual(recordToWrite, record); db.Close(); }
public void a_record_can_be_written() { var filename = Path.Combine(PathName, "prefix.tf0"); var chunkHeader = new ChunkHeader(TFChunk.CurrentChunkVersion, 10000, 0, 0, 0); var chunkBytes = chunkHeader.AsByteArray(); var bytes = new byte[ChunkHeader.Size + 10000 + ChunkFooter.Size]; Buffer.BlockCopy(chunkBytes, 0, bytes, 0, chunkBytes.Length); File.WriteAllBytes(filename, bytes); _checkpoint = new InMemoryCheckpoint(137); var db = new TFChunkDb(new TFChunkDbConfig(PathName, new PrefixFileNamingStrategy(PathName, "prefix.tf"), 10000, 0, _checkpoint, new InMemoryCheckpoint(), new ICheckpoint[0])); db.OpenVerifyAndClean(); var tf = new TFChunkWriter(db); var record = new PrepareLogRecord(logPosition: 0, correlationId: _correlationId, eventId: _eventId, expectedVersion: 1234, transactionPosition: 0, transactionOffset: 0, eventStreamId: "WorldEnding", timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: "type", data: new byte[] { 1, 2, 3, 4, 5 }, metadata: new byte[] { 7, 17 }); long tmp; tf.Write(record, out tmp); //tf.Flush(); tf.Close(); db.Dispose(); Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix() + 137, _checkpoint.Read()); //137 is fluff assigned to beginning of checkpoint //TODO actually read the event using (var filestream = File.Open(filename, FileMode.Open, FileAccess.Read)) { filestream.Seek(ChunkHeader.Size + 137 + sizeof(int), SeekOrigin.Begin); var reader = new BinaryReader(filestream); var read = LogRecord.ReadFrom(reader); Assert.AreEqual(record, read); } }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); WriterChecksum = new InMemoryCheckpoint(0); ChaserChecksum = new InMemoryCheckpoint(0); Db = new TFChunkDb(new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 10000, 0, WriterChecksum, ChaserChecksum, new[] { WriterChecksum, ChaserChecksum })); Db.OpenVerifyAndClean(); // create db Writer = new TFChunkWriter(Db); Writer.Open(); WriteTestScenario(); Writer.Close(); Writer = null; WriterChecksum.Flush(); ChaserChecksum.Write(WriterChecksum.Read()); ChaserChecksum.Flush(); TableIndex = new TableIndex(Path.Combine(PathName, "index"), () => new HashListMemTable(MaxEntriesInMemTable * 2), MaxEntriesInMemTable); var reader = new TFChunkReader(Db, Db.Config.WriterCheckpoint); ReadIndex = new ReadIndex(new NoopPublisher(), 2, () => new TFChunkSequentialReader(Db, Db.Config.WriterCheckpoint, 0), () => reader, TableIndex, new ByLengthHasher(), new NoLRUCache <string, StreamCacheInfo>()); ReadIndex.Build(); // scavenge must run after readIndex is built if (_scavenge) { _scavenger = new TFChunkScavenger(Db, ReadIndex); _scavenger.Scavenge(alwaysKeepScavenged: true); } }
public void a_record_can_be_written() { var filename = GetFilePathFor("chunk-000000.000000"); var chunkHeader = new ChunkHeader(TFChunk.CurrentChunkVersion, 10000, 0, 0, false, chunkId: Guid.NewGuid()); var chunkBytes = chunkHeader.AsByteArray(); var bytes = new byte[ChunkHeader.Size + 10000 + ChunkFooter.Size]; Buffer.BlockCopy(chunkBytes, 0, bytes, 0, chunkBytes.Length); File.WriteAllBytes(filename, bytes); _checkpoint = new InMemoryCheckpoint(137); var db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, _checkpoint, new InMemoryCheckpoint())); db.Open(); var recordFactory = LogFormatHelper <TLogFormat, TStreamId> .RecordFactory; var streamId = LogFormatHelper <TLogFormat, TStreamId> .StreamId; var eventTypeId = LogFormatHelper <TLogFormat, TStreamId> .EventTypeId; var tf = new TFChunkWriter(db); var record = LogRecord.Prepare( factory: recordFactory, logPosition: _checkpoint.Read(), correlationId: _correlationId, eventId: _eventId, expectedVersion: 1234, transactionPos: 0, transactionOffset: 0, eventStreamId: streamId, timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: eventTypeId, data: new byte[] { 1, 2, 3, 4, 5 }, metadata: new byte[] { 7, 17 }); long tmp; tf.Write(record, out tmp); tf.Close(); db.Dispose(); Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix() + 137, _checkpoint.Read()); //137 is fluff assigned to beginning of checkpoint using (var filestream = File.Open(filename, FileMode.Open, FileAccess.Read)) { filestream.Seek(ChunkHeader.Size + 137 + sizeof(int), SeekOrigin.Begin); var reader = new BinaryReader(filestream); var read = LogRecord.ReadFrom(reader, (int)reader.BaseStream.Length); Assert.AreEqual(record, read); } }
public void try_read_returns_record_when_writerchecksum_equal() { var writerchk = new InMemoryCheckpoint(0); var chaserchk = new InMemoryCheckpoint(Checkpoint.Chaser, 0); var db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, writerchk, chaserchk)); db.Open(); _logFormat.StreamNameIndex.GetOrAddId("WorldEnding", out var streamId, out _, out _); var recordToWrite = LogRecord.Prepare( factory: _logFormat.RecordFactory, logPosition: 0, correlationId: _correlationId, eventId: _eventId, transactionPos: 0, transactionOffset: 0, eventStreamId: streamId, expectedVersion: 1234, timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: "type", data: new byte[] { 1, 2, 3, 4, 5 }, metadata: new byte[] { 7, 17 }); var writer = new TFChunkWriter(db); writer.Open(); long pos; Assert.IsTrue(writer.Write(recordToWrite, out pos)); writer.Close(); writerchk.Write(recordToWrite.GetSizeWithLengthPrefixAndSuffix()); var chaser = new TFChunkChaser(db, writerchk, chaserchk, false); chaser.Open(); ILogRecord record; var readRecord = chaser.TryReadNext(out record); chaser.Close(); Assert.IsTrue(readRecord); Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix(), chaserchk.Read()); Assert.AreEqual(recordToWrite, record); db.Close(); }
public void try_read_returns_record_when_writerchecksum_equal() { var writerchk = new InMemoryCheckpoint(0); var chaserchk = new InMemoryCheckpoint(Checkpoint.Chaser, 0); var db = new TFChunkDb(new TFChunkDbConfig(PathName, new PrefixFileNamingStrategy(PathName, "prefix.tf"), 10000, 0, writerchk, new[] { chaserchk })); db.OpenVerifyAndClean(); var recordToWrite = new PrepareLogRecord(logPosition: 0, correlationId: _correlationId, eventId: _eventId, transactionPosition: 0, eventStreamId: "WorldEnding", expectedVersion: 1234, timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: "type", data: new byte[] { 1, 2, 3, 4, 5 }, metadata: new byte[] { 7, 17 }); var writer = new TFChunkWriter(db); writer.Open(); long pos; Assert.IsTrue(writer.Write(recordToWrite, out pos)); writer.Close(); writerchk.Write(recordToWrite.GetSizeWithLengthPrefix()); var chaser = new TFChunkChaser(db, writerchk, chaserchk); chaser.Open(); LogRecord record; var readRecord = chaser.TryReadNext(out record); chaser.Close(); Assert.IsTrue(readRecord); Assert.AreEqual(record.GetSizeWithLengthPrefix(), chaserchk.Read()); Assert.AreEqual(recordToWrite, record); db.Close(); }
public StorageWriterService(IPublisher bus, ISubscriber subscribeToBus, TimeSpan minFlushDelay, TFChunkDb db, TFChunkWriter writer, IIndexWriter indexWriter, IEpochManager epochManager, QueueStatsManager queueStatsManager) { Ensure.NotNull(bus, "bus"); Ensure.NotNull(subscribeToBus, "subscribeToBus"); Ensure.NotNull(db, "db"); Ensure.NotNull(writer, "writer"); Ensure.NotNull(indexWriter, "indexWriter"); Ensure.NotNull(epochManager, "epochManager"); Bus = bus; _subscribeToBus = subscribeToBus; Db = db; _indexWriter = indexWriter; EpochManager = epochManager; _minFlushDelay = minFlushDelay.TotalMilliseconds * TicksPerMs; _lastFlushDelay = 0; _lastFlushTimestamp = _watch.ElapsedTicks; Writer = writer; Writer.Open(); _writerBus = new InMemoryBus("StorageWriterBus", watchSlowMsg: false); StorageWriterQueue = QueuedHandler.CreateQueuedHandler(new AdHocHandler <Message>(CommonHandle), "StorageWriterQueue", queueStatsManager, true, TimeSpan.FromMilliseconds(500)); _tasks.Add(StorageWriterQueue.Start()); SubscribeToMessage <SystemMessage.SystemInit>(); SubscribeToMessage <SystemMessage.StateChangeMessage>(); SubscribeToMessage <SystemMessage.WriteEpoch>(); SubscribeToMessage <SystemMessage.WaitForChaserToCatchUp>(); SubscribeToMessage <StorageMessage.WritePrepares>(); SubscribeToMessage <StorageMessage.WriteDelete>(); SubscribeToMessage <StorageMessage.WriteTransactionStart>(); SubscribeToMessage <StorageMessage.WriteTransactionData>(); SubscribeToMessage <StorageMessage.WriteTransactionEnd>(); SubscribeToMessage <StorageMessage.WriteCommit>(); }
public override async Task TestFixtureSetUp() { await base.TestFixtureSetUp(); _mainBus = new InMemoryBus(nameof(when_having_an_epoch_manager_and_empty_tf_log <TLogFormat, TStreamId>)); _mainBus.Subscribe(new AdHocHandler <SystemMessage.EpochWritten>(m => _published.Add(m))); _db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, 0)); _db.Open(); _reader = new TFChunkReader(_db, _db.Config.WriterCheckpoint); _writer = new TFChunkWriter(_db); _epochManager = GetManager(); _epochManager.Init(); _cache = GetCache(_epochManager); Assert.NotNull(_cache); }
public override async Task TestFixtureSetUp() { await base.TestFixtureSetUp(); var indexDirectory = GetFilePathFor("index"); _logFormat = LogFormatHelper <TLogFormat, TStreamId> .LogFormatFactory.Create(new() { IndexDirectory = indexDirectory, }); TcpSendPublisher.Subscribe(new AdHocHandler <TcpMessage.TcpSend>(msg => TcpSends.Enqueue(msg))); DbConfig = CreateDbConfig(); Db = new TFChunkDb(DbConfig); Db.Open(); Writer = new TFChunkWriter(Db); EpochManager = new EpochManager <TStreamId>( Publisher, 5, DbConfig.EpochCheckpoint, Writer, 1, 1, () => new TFChunkReader(Db, Db.Config.WriterCheckpoint, optimizeReadSideCache: Db.Config.OptimizeReadSideCache), _logFormat.RecordFactory, _logFormat.StreamNameIndex, _logFormat.EventTypeIndex, _logFormat.CreatePartitionManager( reader: new TFChunkReader(Db, Db.Config.WriterCheckpoint), writer: Writer), Guid.NewGuid()); Service = new LeaderReplicationService( Publisher, LeaderId, Db, TcpSendPublisher, EpochManager, ClusterSize, false, new QueueStatsManager()); Service.Handle(new SystemMessage.SystemStart()); Service.Handle(new SystemMessage.BecomeLeader(Guid.NewGuid())); When(); }
public StorageWriterService(IPublisher bus, ISubscriber subscribeToBus, int minFlushDelayMs, TFChunkDb db, TFChunkWriter writer, IReadIndex readIndex, IEpochManager epochManager) { Ensure.NotNull(bus, "bus"); Ensure.NotNull(subscribeToBus, "subscribeToBus"); Ensure.Nonnegative(minFlushDelayMs, "minFlushDelayMs"); Ensure.NotNull(db, "db"); Ensure.NotNull(writer, "writer"); Ensure.NotNull(readIndex, "readIndex"); Ensure.NotNull(epochManager, "epochManager"); Bus = bus; _subscribeToBus = subscribeToBus; Db = db; ReadIndex = readIndex; EpochManager = epochManager; _minFlushDelay = minFlushDelayMs * TicksPerMs; _lastFlushDelay = 0; _lastFlushTimestamp = _watch.ElapsedTicks; Writer = writer; Writer.Open(); _writerBus = new InMemoryBus("StorageWriterBus", watchSlowMsg: false); StorageWriterQueue = new QueuedHandler(new AdHocHandler <Message>(CommonHandle), "StorageWriterQueue", true, TimeSpan.FromMilliseconds(500)); StorageWriterQueue.Start(); SubscribeToMessage <SystemMessage.SystemInit>(); SubscribeToMessage <SystemMessage.StateChangeMessage>(); SubscribeToMessage <SystemMessage.WriteEpoch>(); SubscribeToMessage <SystemMessage.WaitForChaserToCatchUp>(); SubscribeToMessage <StorageMessage.WritePrepares>(); SubscribeToMessage <StorageMessage.WriteDelete>(); SubscribeToMessage <StorageMessage.WriteTransactionStart>(); SubscribeToMessage <StorageMessage.WriteTransactionData>(); SubscribeToMessage <StorageMessage.WriteTransactionPrepare>(); SubscribeToMessage <StorageMessage.WriteCommit>(); }
public void a_record_can_be_written() { _checkpoint = new InMemoryCheckpoint(0); var db = new TFChunkDb(new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 1000, 0, _checkpoint, new InMemoryCheckpoint(), new InMemoryCheckpoint(-1), new InMemoryCheckpoint(-1))); db.Open(); var tf = new TFChunkWriter(db); tf.Open(); var record = new PrepareLogRecord(logPosition: 0, correlationId: _correlationId, eventId: _eventId, transactionPosition: 0, transactionOffset: 0, eventStreamId: "WorldEnding", expectedVersion: 1234, timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: "type", data: new byte[] { 1, 2, 3, 4, 5 }, metadata: new byte[] { 7, 17 }); long tmp; tf.Write(record, out tmp); tf.Close(); db.Dispose(); Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix(), _checkpoint.Read()); using (var filestream = File.Open(GetFilePathFor("chunk-000000.000000"), FileMode.Open, FileAccess.Read)) { filestream.Position = ChunkHeader.Size; var reader = new BinaryReader(filestream); reader.ReadInt32(); var read = LogRecord.ReadFrom(reader); Assert.AreEqual(record, read); } }