public override void TestFixtureSetUp() { base.TestFixtureSetUp(); _chunk = TFChunk.CreateNew(Filename, 4096, 0, 0, isScavenged: true, inMem: false, unbuffered: false, writethrough: false); _chunk.CompleteScavenge(new PosMap[0]); _chunk.CacheInMemory(); }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); _chunk = TFChunkHelper.CreateNewChunk(Filename); var logFormat = LogFormatHelper <TLogFormat, TStreamId> .LogFormat; logFormat.StreamNameIndex.GetOrAddId("test", out var streamId1, out _, out _); logFormat.StreamNameIndex.GetOrAddId("test2", out var streamId2, out _, out _); _prepare1 = LogRecord.Prepare(logFormat.RecordFactory, 0, _corrId, _eventId, 0, 0, streamId1, 1, PrepareFlags.None, "Foo", new byte[12], new byte[15], new DateTime(2000, 1, 1, 12, 0, 0)); var r1 = _chunk.TryAppend(_prepare1); _written1 = r1.Success; _position1 = r1.OldPosition; _prepare2 = LogRecord.Prepare(logFormat.RecordFactory, r1.NewPosition, _corrId, _eventId, 0, 0, streamId2, 2, PrepareFlags.None, "Foo2", new byte[12], new byte[15], new DateTime(2000, 1, 1, 12, 0, 0)); var r2 = _chunk.TryAppend(_prepare2); _written2 = r2.Success; _position2 = r2.OldPosition; _chunk.Flush(); }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); _chunk = TFChunk.CreateNew(Filename, 4096, 0, 0, isScavenged: false, inMem: false, unbuffered: false, writethrough: false); _chunk.Complete(); _testChunk = TFChunk.FromCompletedFile(Filename, true, false); }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); _chunk = TFChunk.CreateNew(Filename, 4096, 0, false); _chunk.Complete(); _testChunk = TFChunk.FromCompletedFile(Filename, verifyHash: true); }
public override void SetUp() { base.SetUp(); _chunk = TFChunk.CreateNew(Filename, 1000, 0, 0, isScavenged: false, inMem: false, unbuffered: false, writethrough: false); _reader = _chunk.AcquireReader(); _chunk.MarkForDeletion(); }
public override void SetUp() { base.SetUp(); _chunk = TFChunkHelper.CreateNewChunk(Filename, 1000); _reader = _chunk.AcquireReader(); _chunk.MarkForDeletion(); }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); _chunk = TFChunkHelper.CreateNewChunk(Filename); var recordFactory = LogFormatHelper <TLogFormat, TStreamId> .RecordFactory; var streamId1 = LogFormatHelper <TLogFormat, TStreamId> .StreamId; var streamId2 = LogFormatHelper <TLogFormat, TStreamId> .StreamId2; var eventTypeId1 = LogFormatHelper <TLogFormat, TStreamId> .EventTypeId; var eventTypeId2 = LogFormatHelper <TLogFormat, TStreamId> .EventTypeId2; _prepare1 = LogRecord.Prepare(recordFactory, 0, _corrId, _eventId, 0, 0, streamId1, 1, PrepareFlags.None, eventTypeId1, new byte[12], new byte[15], new DateTime(2000, 1, 1, 12, 0, 0)); var r1 = _chunk.TryAppend(_prepare1); _written1 = r1.Success; _position1 = r1.OldPosition; _prepare2 = LogRecord.Prepare(recordFactory, r1.NewPosition, _corrId, _eventId, 0, 0, streamId2, 2, PrepareFlags.None, eventTypeId2, new byte[12], new byte[15], new DateTime(2000, 1, 1, 12, 0, 0)); var r2 = _chunk.TryAppend(_prepare2); _written2 = r2.Success; _position2 = r2.OldPosition; _chunk.Flush(); }
public void is_fully_resident_in_memory_when_cached() { var map = new List <PosMap>(); var chunk = TFChunk.CreateNew(Filename, 1024 * 1024, 0, 0, true, false, false, false, 5, false); long logPos = 0; for (int i = 0, n = ChunkFooter.Size / PosMap.FullSize + 1; i < n; ++i) { map.Add(new PosMap(logPos, (int)logPos)); var res = chunk.TryAppend(LogRecord.Commit(logPos, Guid.NewGuid(), logPos, 0)); Assert.IsTrue(res.Success); logPos = res.NewPosition; } chunk.CompleteScavenge(map); chunk.CacheInMemory(); Assert.IsTrue(chunk.IsCached); var last = chunk.TryReadLast(); Assert.IsTrue(last.Success); Assert.AreEqual(map[map.Count - 1].ActualPos, last.LogRecord.LogPosition); chunk.MarkForDeletion(); chunk.WaitForDestroy(1000); }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); _chunk = TFChunk.CreateNew(Filename, 4096, 0, 0, isScavenged: true); _chunk.CompleteScavenge(new PosMap[0]); _chunk.CacheInMemory(); }
private TFChunk CreateChunk(int chunkNumber, bool scavenged, out List <PosMap> posmap) { var map = new List <PosMap>(); var chunk = TFChunk.CreateNew(GetFilePathFor("chunk-" + chunkNumber + "-" + Guid.NewGuid()), 1024 * 1024, chunkNumber, chunkNumber, scavenged, false, false, false, 5, false); long offset = chunkNumber * 1024 * 1024; long logPos = 0 + offset; for (int i = 0, n = ChunkFooter.Size / PosMap.FullSize + 1; i < n; ++i) { if (scavenged) { map.Add(new PosMap(logPos, (int)logPos)); } var res = chunk.TryAppend(LogRecord.Commit(logPos, Guid.NewGuid(), logPos, 0)); Assert.IsTrue(res.Success); logPos = res.NewPosition + offset; } if (scavenged) { posmap = map; chunk.CompleteScavenge(map); } else { posmap = null; chunk.Complete(); } return(chunk); }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); _chunk = TFChunk.CreateNew(Filename, 4096, 0, 0, false); _chunk.Complete(); _testChunk = TFChunk.FromCompletedFile(Filename, verifyHash: true); }
public override void SetUp() { base.SetUp(); _chunk = TFChunk.CreateNew(Filename, 1000, 0, 0, false); _reader = _chunk.AcquireReader(); _chunk.MarkForDeletion(); }
internal TFChunkBulkReader(TFChunk.TFChunk chunk, Stream streamToUse) { Ensure.NotNull(chunk, "chunk"); Ensure.NotNull(streamToUse, "stream"); _chunk = chunk; _stream = streamToUse; }
public void Handle(ReplicationMessage.CreateChunk message) { if (_subscriptionId != message.SubscriptionId) { return; } if (_activeChunk != null) { _activeChunk.MarkForDeletion(); _activeChunk = null; } _framer.Reset(); if (message.IsCompletedChunk) { _activeChunk = Db.Manager.CreateTempChunk(message.ChunkHeader, message.FileSize); } else { if (message.ChunkHeader.ChunkStartNumber != Db.Manager.ChunksCount) { ReplicationFail("Received request to create a new ongoing chunk #{0}-{1}, but current chunks count is {2}.", message.ChunkHeader.ChunkStartNumber, message.ChunkHeader.ChunkEndNumber, Db.Manager.ChunksCount); } Db.Manager.AddNewChunk(message.ChunkHeader, message.FileSize); } _subscriptionPos = message.ChunkHeader.ChunkStartPosition; _ackedSubscriptionPos = _subscriptionPos; Bus.Publish(new ReplicationMessage.AckLogPosition(_subscriptionId, _ackedSubscriptionPos)); }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); _chunk = TFChunkHelper.CreateNewChunk(Filename); _chunk.Complete(); _testChunk = TFChunk.FromCompletedFile(Filename, true, false, 5, reduceFileCachePressure: false); }
public void Handle(ReplicationMessage.RawChunkBulk message) { if (_subscriptionId != message.SubscriptionId) { return; } if (_activeChunk == null) { ReplicationFail( "Physical chunk bulk received, but we do not have active chunk.", "Physical chunk bulk received, but we do not have active chunk."); } if (_activeChunk.ChunkHeader.ChunkStartNumber != message.ChunkStartNumber || _activeChunk.ChunkHeader.ChunkEndNumber != message.ChunkEndNumber) { Log.Error( "Received RawChunkBulk for TFChunk {chunkStartNumber}-{chunkEndNumber}, but active chunk is {activeChunk}.", message.ChunkStartNumber, message.ChunkEndNumber, _activeChunk); return; } if (_activeChunk.RawWriterPosition != message.RawPosition) { Log.Error( "Received RawChunkBulk at raw pos {rawPosition} (0x{rawPosition:X}) while current writer raw pos is {rawWriterPosition} (0x{rawWriterPosition:X}).", message.RawPosition, message.RawPosition, _activeChunk.RawWriterPosition, _activeChunk.RawWriterPosition); return; } if (!_activeChunk.TryAppendRawData(message.RawBytes)) { ReplicationFail( "Could not append raw bytes to chunk {0}-{1}, raw pos: {2} (0x{3:X}), bytes length: {4} (0x{5:X}). Chunk file size: {6} (0x{7:X}).", "Could not append raw bytes to chunk {chunkStartNumber}-{chunkEndNumber}, raw pos: {rawPosition} (0x{rawPosition:X}), bytes length: {rawBytesLength} (0x{rawBytesLength:X}). Chunk file size: {chunkFileSize} (0x{chunkFileSize:X}).", message.ChunkStartNumber, message.ChunkEndNumber, message.RawPosition, message.RawPosition, message.RawBytes.Length, message.RawBytes.Length, _activeChunk.FileSize, _activeChunk.FileSize); } _subscriptionPos += message.RawBytes.Length; if (message.CompleteChunk) { Log.Trace("Completing raw chunk {chunkStartNumber}-{chunkEndNumber}...", message.ChunkStartNumber, message.ChunkEndNumber); Writer.CompleteReplicatedRawChunk(_activeChunk); _subscriptionPos = _activeChunk.ChunkHeader.ChunkEndPosition; _framer.Reset(); _activeChunk = null; } if (message.CompleteChunk || _subscriptionPos - _ackedSubscriptionPos >= MasterReplicationService.ReplicaAckWindow) { _ackedSubscriptionPos = _subscriptionPos; Bus.Publish(new ReplicationMessage.AckLogPosition(_subscriptionId, _ackedSubscriptionPos)); } }
public void Handle(ReplicationMessage.ReplicaSubscribed message) { if (_activeChunk != null) { _activeChunk.MarkForDeletion(); _activeChunk = null; } _framer.Reset(); _subscriptionId = message.SubscriptionId; _ackedSubscriptionPos = _subscriptionPos = message.SubscriptionPosition; Log.Info("=== SUBSCRIBED to [{masterEndPoint},{masterId:B}] at {subscriptionPosition} (0x{subscriptionPosition:X}). SubscriptionId: {subscriptionId:B}.", message.MasterEndPoint, message.MasterId, message.SubscriptionPosition, message.SubscriptionPosition, message.SubscriptionId); var writerCheck = Db.Config.WriterCheckpoint.ReadNonFlushed(); if (message.SubscriptionPosition > writerCheck) { ReplicationFail( "Master [{0},{1:B}] subscribed us at {2} (0x{3:X}), which is greater than our writer checkpoint {4} (0x{5:X}). REPLICATION BUG.", "Master [{masterEndpoint},{masterId:B}] subscribed us at {subscriptionPosition} (0x{subscriptionPosition:X}), which is greater than our writer checkpoint {writerCheckpoint} (0x{writerCheckpoint:X}). REPLICATION BUG.", message.MasterEndPoint, message.MasterId, message.SubscriptionPosition, message.SubscriptionPosition, writerCheck, writerCheck); } if (message.SubscriptionPosition < writerCheck) { Log.Info("Master [{masterEndPoint},{masterId:B}] subscribed us at {subscriptionPosition} (0x{subscriptionPosition:X}), which is less than our writer checkpoint {writerCheckpoint} (0x{writerCheckpoint:X}). TRUNCATION IS NEEDED.", message.MasterEndPoint, message.MasterId, message.SubscriptionPosition, message.SubscriptionPosition, writerCheck, writerCheck); var lastCommitPosition = _getLastCommitPosition(); if (message.SubscriptionPosition > lastCommitPosition) { Log.Info("ONLINE TRUNCATION IS NEEDED. NOT IMPLEMENTED. OFFLINE TRUNCATION WILL BE PERFORMED. SHUTTING DOWN NODE."); } else { Log.Info("OFFLINE TRUNCATION IS NEEDED (SubscribedAt {subscriptionPosition} (0x{subscriptionPosition:X}) <= LastCommitPosition {lastCommitPosition} (0x{lastCommitPosition:X})). SHUTTING DOWN NODE.", message.SubscriptionPosition, message.SubscriptionPosition, lastCommitPosition, lastCommitPosition); } EpochRecord lastEpoch = EpochManager.GetLastEpoch(); if (AreAnyCommittedRecordsTruncatedWithLastEpoch(message.SubscriptionPosition, lastEpoch, lastCommitPosition)) { Log.Error("Master [{masterEndPoint},{masterId:B}] subscribed us at {subscriptionPosition} (0x{subscriptionPosition:X}), which is less than our last epoch and LastCommitPosition {lastCommitPosition} (0x{lastCommitPosition:X}) >= lastEpoch.EpochPosition {lastEpochPosition} (0x{lastEpochPosition:X}). That might be bad, especially if the LastCommitPosition is way beyond EpochPosition.", message.MasterEndPoint, message.MasterId, message.SubscriptionPosition, message.SubscriptionPosition, lastCommitPosition, lastCommitPosition, lastEpoch.EpochPosition, lastEpoch.EpochPosition); Log.Error("ATTEMPT TO TRUNCATE EPOCH WITH COMMITTED RECORDS. THIS MAY BE BAD, BUT IT IS OK IF JUST-ELECTED MASTER FAILS IMMEDIATELY AFTER ITS ELECTION."); } Db.Config.TruncateCheckpoint.Write(message.SubscriptionPosition); Db.Config.TruncateCheckpoint.Flush(); BlockWriter = true; Bus.Publish(new ClientMessage.RequestShutdown(exitProcess: true, shutdownHttp: true)); return; } // subscription position == writer checkpoint // everything is ok }
public void setup() { chunk = TFChunk.CreateNew(_filename, 1000, 0, 0); var reader = chunk.AcquireReader(); chunk.MarkForDeletion(); reader.Release(); }
public override void SetUp() { base.SetUp(); var record = new PrepareLogRecord(15556, _corrId, _eventId, 15556, 0, "test", 1, new DateTime(2000, 1, 1, 12, 0, 0), PrepareFlags.None, "Foo", new byte[12], new byte[15]); _chunk = TFChunk.CreateNew(Filename, 20, 0, false); _written = _chunk.TryAppend(record).Success; }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); _record = new PrepareLogRecord(0, _corrId, _eventId, 0, 0, "test", 1, new DateTime(2000, 1, 1, 12, 0, 0), PrepareFlags.None, "Foo", new byte[12], new byte[15]); _chunk = TFChunk.CreateNew(Filename, 4096, 0, false); _result = _chunk.TryAppend(_record); }
public void Setup() { var record = new PrepareLogRecord(15556, _corrId, _eventId, 15556, 0, "test", 1, new DateTime(2000, 1, 1, 12, 0, 0), PrepareFlags.None, "Foo", new byte[12], new byte[15]); _chunk = TFChunk.CreateNew(_filename, 20, 0, 0); _written = _chunk.TryAppend(record).Success; }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); _record = new PrepareLogRecord(0, _corrId, _eventId, 0, 0, "test", 1, new DateTime(2000, 1, 1, 12, 0, 0), PrepareFlags.None, "Foo", new byte[12], new byte[15]); _chunk = TFChunk.CreateNew(Filename, 4096, 0, 0, false); _result = _chunk.TryAppend(_record); }
public void Setup() { _record = new PrepareLogRecord(0, _corrId, _eventId, 0, 0, "test", 1, new DateTime(2000, 1, 1, 12, 0, 0), PrepareFlags.None, "Foo", new byte[12], new byte[15]); _chunk = TFChunk.CreateNew(_filename, 4096, 0, 0); _result = _chunk.TryAppend(_record); _chunk.Flush(); }
public override void SetUp() { base.SetUp(); var record = new PrepareLogRecord(15556, _corrId, _eventId, 15556, 0, "test", 1, new DateTime(2000, 1, 1, 12, 0, 0), PrepareFlags.None, "Foo", new byte[12], new byte[15]); _chunk = TFChunkHelper.CreateNewChunk(Filename, 20); _written = _chunk.TryAppend(record).Success; }
public override void SetUp() { base.SetUp(); var record = new PrepareLogRecord(15556, _corrId, _eventId, 15556, 0, "test", 1, new DateTime(2000, 1, 1, 12, 0, 0), PrepareFlags.None, "Foo", new byte[12], new byte[15]); _chunk = TFChunk.CreateNew(Filename, 20, 0, 0, isScavenged: false, inMem: false, unbuffered: false, writethrough: false); _written = _chunk.TryAppend(record).Success; }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); _chunk = TFChunkHelper.CreateNewChunk(Filename); _chunk.Complete(); _testChunk = TFChunk.FromCompletedFile(Filename, true, false, Constants.TFChunkInitialReaderCountDefault, Constants.TFChunkMaxReaderCountDefault, reduceFileCachePressure: false); }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); _db = new TFChunkDb(new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 16 * 1024, 0, new InMemoryCheckpoint(), new InMemoryCheckpoint(), new InMemoryCheckpoint(-1), new InMemoryCheckpoint(-1))); _db.Open(); var chunk = _db.Manager.GetChunkFor(0); _p1 = LogRecord.SingleWrite(0, Guid.NewGuid(), Guid.NewGuid(), "es-to-scavenge", ExpectedVersion.Any, "et1", new byte[] { 0, 1, 2 }, new byte[] { 5, 7 }); _res1 = chunk.TryAppend(_p1); _c1 = LogRecord.Commit(_res1.NewPosition, Guid.NewGuid(), _p1.LogPosition, 0); _cres1 = chunk.TryAppend(_c1); _p2 = LogRecord.SingleWrite(_cres1.NewPosition, Guid.NewGuid(), Guid.NewGuid(), "es-to-scavenge", ExpectedVersion.Any, "et1", new byte[] { 0, 1, 2 }, new byte[] { 5, 7 }); _res2 = chunk.TryAppend(_p2); _c2 = LogRecord.Commit(_res2.NewPosition, Guid.NewGuid(), _p2.LogPosition, 1); _cres2 = chunk.TryAppend(_c2); _p3 = LogRecord.SingleWrite(_cres2.NewPosition, Guid.NewGuid(), Guid.NewGuid(), "es-to-scavenge", ExpectedVersion.Any, "et1", new byte[] { 0, 1, 2 }, new byte[] { 5, 7 }); _res3 = chunk.TryAppend(_p3); _c3 = LogRecord.Commit(_res3.NewPosition, Guid.NewGuid(), _p3.LogPosition, 2); _cres3 = chunk.TryAppend(_c3); chunk.Complete(); _db.Config.WriterCheckpoint.Write(chunk.ChunkHeader.ChunkEndPosition); _db.Config.WriterCheckpoint.Flush(); _db.Config.ChaserCheckpoint.Write(chunk.ChunkHeader.ChunkEndPosition); _db.Config.ChaserCheckpoint.Flush(); var bus = new InMemoryBus("Bus"); var ioDispatcher = new IODispatcher(bus, new PublishEnvelope(bus)); var scavenger = new TFChunkScavenger(_db, ioDispatcher, new FakeTableIndex(), new FakeReadIndex(x => x == "es-to-scavenge"), Guid.NewGuid(), "fakeNodeIp"); scavenger.Scavenge(alwaysKeepScavenged: true, mergeChunks: false); _scavengedChunk = _db.Manager.GetChunk(0); }
public void Setup() { _record = new PrepareLogRecord(0, _corrId, _eventId, 0, 0, "test", 1, new DateTime(2000, 1, 1, 12, 0, 0), PrepareFlags.None, "Foo", new byte[12], new byte[15]); _chunk = TFChunk.CreateNew(_filename, 4096, 0, 0); _result = _chunk.TryAppend(_record); _chunk.Flush(); _chunk.Complete(); _cachedChunk = TFChunk.FromCompletedFile(_filename, verifyHash: true); _cachedChunk.CacheInMemory(); }
void Write(int chunkNum, TFChunk chunk, ILogRecord record, out long newPos) { var writerRes = chunk.TryAppend(record); if (!writerRes.Success) { throw new Exception(string.Format("Could not write log record: {0}", record)); } _db.Config.WriterCheckpoint.Write(chunkNum * (long)_db.Config.ChunkSize + writerRes.NewPosition); newPos = _db.Config.WriterCheckpoint.ReadNonFlushed(); }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); var recordFactory = LogFormatHelper <TLogFormat, TStreamId> .RecordFactory; var streamId = LogFormatHelper <TLogFormat, TStreamId> .StreamId; _record = LogRecord.Prepare(recordFactory, 0, _corrId, _eventId, 0, 0, streamId, 1, PrepareFlags.None, "Foo", new byte[12], new byte[15], new DateTime(2000, 1, 1, 12, 0, 0)); _chunk = TFChunkHelper.CreateNewChunk(Filename); _result = _chunk.TryAppend(_record); }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); _record = new PrepareLogRecord(0, _corrId, _eventId, 0, 0, "test", 1, new DateTime(2000, 1, 1, 12, 0, 0), PrepareFlags.None, "Foo", new byte[12], new byte[15]); _chunk = TFChunk.CreateNew(Filename, 4096, 0, false); _result = _chunk.TryAppend(_record); _chunk.Flush(); _chunk.Complete(); _cachedChunk = TFChunk.FromCompletedFile(Filename, verifyHash: true); _cachedChunk.CacheInMemory(); }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); _record = new PrepareLogRecord(0, _corrId, _eventId, 0, 0, "test", 1, new DateTime(2000, 1, 1, 12, 0, 0), PrepareFlags.None, "Foo", new byte[12], new byte[15]); _chunk = TFChunkHelper.CreateNewChunk(Filename); _result = _chunk.TryAppend(_record); _chunk.Flush(); _chunk.Complete(); _cachedChunk = TFChunk.FromCompletedFile(Filename, verifyHash: true, unbufferedRead: false, initialReaderCount: 5, reduceFileCachePressure: false); _cachedChunk.CacheInMemory(); }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); _record = new PrepareLogRecord(0, _corrId, _eventId, 0, 0, "test", 1, new DateTime(2000, 1, 1, 12, 0, 0), PrepareFlags.None, "Foo", new byte[12], new byte[15]); _chunk = TFChunk.CreateNew(Filename, 4096, 0, 0, isScavenged: false, inMem: false, unbuffered: false, writethrough: false); _result = _chunk.TryAppend(_record); _chunk.Flush(); _chunk.Complete(); _cachedChunk = TFChunk.FromCompletedFile(Filename, verifyHash: true, unbufferedRead: false); _cachedChunk.CacheInMemory(); }
public override async Task TestFixtureSetUp() { await base.TestFixtureSetUp(); _db = new TFChunkDb(TFChunkHelper.CreateSizedDbConfig(PathName, 0, chunkSize: 16 * 1024)); _db.Open(); var chunk = _db.Manager.GetChunkFor(0); var logFormat = LogFormatHelper <TLogFormat, TStreamId> .LogFormat; var streamName = "es-to-scavenge"; logFormat.StreamNameIndex.GetOrAddId(streamName, out var streamId, out _, out _); var expectedVersion = ExpectedVersion.NoStream; _p1 = LogRecord.SingleWrite(logFormat.RecordFactory, 0, Guid.NewGuid(), Guid.NewGuid(), streamId, expectedVersion++, "et1", new byte[2048], new byte[] { 5, 7 }); _res1 = chunk.TryAppend(_p1); _c1 = LogRecord.Commit(_res1.NewPosition, Guid.NewGuid(), _p1.LogPosition, 0); _cres1 = chunk.TryAppend(_c1); _p2 = LogRecord.SingleWrite(logFormat.RecordFactory, _cres1.NewPosition, Guid.NewGuid(), Guid.NewGuid(), streamId, expectedVersion++, "et1", new byte[2048], new byte[] { 5, 7 }); _res2 = chunk.TryAppend(_p2); _c2 = LogRecord.Commit(_res2.NewPosition, Guid.NewGuid(), _p2.LogPosition, 1); _cres2 = chunk.TryAppend(_c2); _p3 = LogRecord.SingleWrite(logFormat.RecordFactory, _cres2.NewPosition, Guid.NewGuid(), Guid.NewGuid(), streamId, expectedVersion++, "et1", new byte[2048], new byte[] { 5, 7 }); _res3 = chunk.TryAppend(_p3); _c3 = LogRecord.Commit(_res3.NewPosition, Guid.NewGuid(), _p3.LogPosition, 2); _cres3 = chunk.TryAppend(_c3); chunk.Complete(); _originalFileSize = chunk.FileSize; _db.Config.WriterCheckpoint.Write(chunk.ChunkHeader.ChunkEndPosition); _db.Config.WriterCheckpoint.Flush(); _db.Config.ChaserCheckpoint.Write(chunk.ChunkHeader.ChunkEndPosition); _db.Config.ChaserCheckpoint.Flush(); var scavenger = new TFChunkScavenger <TStreamId>(_db, new FakeTFScavengerLog(), new FakeTableIndex <TStreamId>(), new FakeReadIndex <TLogFormat, TStreamId>(x => EqualityComparer <TStreamId> .Default.Equals(x, streamId)), logFormat.SystemStreams); await scavenger.Scavenge(alwaysKeepScavenged : true, mergeChunks : false); _scavengedChunk = _db.Manager.GetChunk(0); }
public override void SetUp() { base.SetUp(); var recordFactory = LogFormatHelper <TLogFormat, TStreamId> .RecordFactory; var streamId = LogFormatHelper <TLogFormat, TStreamId> .StreamId; var record = LogRecord.Prepare(recordFactory, 15556, _corrId, _eventId, 15556, 0, streamId, 1, PrepareFlags.None, "Foo", new byte[12], new byte[15], new DateTime(2000, 1, 1, 12, 0, 0)); _chunk = TFChunkHelper.CreateNewChunk(Filename, 20); _written = _chunk.TryAppend(record).Success; }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); _prepare1 = new PrepareLogRecord(0, _corrId, _eventId, 0, 0, "test", 1, new DateTime(2000, 1, 1, 12, 0, 0), PrepareFlags.None, "Foo", new byte[12], new byte[15]); _prepare2 = new PrepareLogRecord(0, _corrId, _eventId, 0, 0, "test2", 2, new DateTime(2000, 1, 1, 12, 0, 0), PrepareFlags.None, "Foo2", new byte[12], new byte[15]); _chunk = TFChunk.CreateNew(Filename, 4096, 0, false); var r1 = _chunk.TryAppend(_prepare1); _written1 = r1.Success; _position1 = r1.OldPosition; var r2 = _chunk.TryAppend(_prepare2); _written2 = r2.Success; _position2 = r2.OldPosition; _chunk.Flush(); }
private void ScavengeChunk(TFChunk.TFChunk oldChunk, bool alwaysKeepScavenged) { var sw = Stopwatch.StartNew(); int chunkStartNumber = oldChunk.ChunkHeader.ChunkStartNumber; int chunkEndNumber = oldChunk.ChunkHeader.ChunkStartNumber; long chunkStartPosition = chunkStartNumber * (long)oldChunk.ChunkHeader.ChunkSize; int chunkSize = oldChunk.ChunkHeader.ChunkSize; var tmpChunkPath = Path.Combine(_db.Config.Path, Guid.NewGuid() + ".scavenge.tmp"); Log.Trace("Scavenging chunk #{0}-{1} ({2}) started. Temp file: {3}.", chunkStartNumber, chunkEndNumber, Path.GetFileName(oldChunk.FileName), Path.GetFileName(tmpChunkPath)); TFChunk.TFChunk newChunk; try { newChunk = TFChunk.TFChunk.CreateNew(tmpChunkPath, chunkSize, chunkStartNumber, chunkEndNumber, isScavenged: true); } catch (IOException exc) { Log.ErrorException(exc, "IOException during creating new chunk for scavenging purposes. Ignoring..."); return; } var commits = new Dictionary<long, CommitInfo>(); TraverseChunk(oldChunk, prepare => { // NOOP }, commit => { if (commit.TransactionPosition < chunkStartPosition) return; commits.Add(commit.TransactionPosition, new CommitInfo(commit)); }); var positionMapping = new List<PosMap>(); TraverseChunk(oldChunk, prepare => { if (ShouldKeepPrepare(prepare, commits)) { var posMap = WriteRecord(newChunk, prepare); positionMapping.Add(posMap); } }, commit => { if (ShouldKeepCommit(commit, commits)) { var posMap = WriteRecord(newChunk, commit); positionMapping.Add(posMap); } }); var oldSize = oldChunk.ChunkFooter.ActualChunkSize + oldChunk.ChunkFooter.MapSize + ChunkHeader.Size + ChunkFooter.Size; var newSize = newChunk.ActualDataSize + sizeof(ulong) * positionMapping.Count + ChunkHeader.Size + ChunkFooter.Size; if (!alwaysKeepScavenged && oldSize <= newSize) { Log.Trace("Scavenging of chunk #{0}-{1} ({2}) completed in {3}.\n" + "Old version is kept as it is smaller.\n" + "Old chunk size: {4}, scavenged size: {5}.\n" + "Scavenged chunk removed.", chunkStartNumber, chunkEndNumber, oldChunk.FileName, sw.Elapsed, oldSize, newSize); newChunk.MarkForDeletion(); } else { newChunk.CompleteScavenge(positionMapping); var chunk = _db.Manager.SwitchChunk(newChunk, verifyHash: false, replaceChunksWithGreaterNumbers: false); Log.Trace("Scavenging of chunk #{0}-{1} ({2}) completed in {3}.\n" + "File {4} --> {5}.\n" + "Old size: {6}, new size: {7}.", chunkStartNumber, chunkEndNumber, Path.GetFileName(oldChunk.FileName), sw.Elapsed, Path.GetFileName(tmpChunkPath), Path.GetFileName(chunk.FileName), oldSize, newSize); } }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); _db = new TFChunkDb(new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 16 * 1024, 0, new InMemoryCheckpoint(), new InMemoryCheckpoint(), new ICheckpoint[0])); _db.OpenVerifyAndClean(); var chunk = _db.Manager.GetChunk(0); _p1 = LogRecord.SingleWrite(0, Guid.NewGuid(), Guid.NewGuid(), "es-to-scavenge", ExpectedVersion.Any, "et1", new byte[] { 0, 1, 2 }, new byte[] { 5, 7 }); _res1 = chunk.TryAppend(_p1); _c1 = LogRecord.Commit(_res1.NewPosition, Guid.NewGuid(), _p1.LogPosition, 0); _cres1 = chunk.TryAppend(_c1); _p2 = LogRecord.SingleWrite(_cres1.NewPosition, Guid.NewGuid(), Guid.NewGuid(), "es-to-scavenge", ExpectedVersion.Any, "et1", new byte[] { 0, 1, 2 }, new byte[] { 5, 7 }); _res2 = chunk.TryAppend(_p2); _c2 = LogRecord.Commit(_res2.NewPosition, Guid.NewGuid(), _p2.LogPosition, 1); _cres2 = chunk.TryAppend(_c2); _p3 = LogRecord.SingleWrite(_cres2.NewPosition, Guid.NewGuid(), Guid.NewGuid(), "es-to-scavenge", ExpectedVersion.Any, "et1", new byte[] { 0, 1, 2 }, new byte[] { 5, 7 }); _res3 = chunk.TryAppend(_p3); _c3 = LogRecord.Commit(_res3.NewPosition, Guid.NewGuid(), _p3.LogPosition, 2); _cres3 = chunk.TryAppend(_c3); chunk.Complete(); var scavenger = new TFChunkScavenger(_db, new FakeReadIndex(x => x == "es-to-scavenge")); scavenger.Scavenge(alwaysKeepScavenged: true); _scavengedChunk = _db.Manager.GetChunk(0); }
private void TraverseChunk(TFChunk.TFChunk chunk, Action<PrepareLogRecord> processPrepare, Action<CommitLogRecord> processCommit, Action<SystemLogRecord> processSystem) { var result = chunk.TryReadFirst(); while (result.Success) { var record = result.LogRecord; switch (record.RecordType) { case LogRecordType.Prepare: { var prepare = (PrepareLogRecord)record; processPrepare(prepare); break; } case LogRecordType.Commit: { var commit = (CommitLogRecord)record; processCommit(commit); break; } case LogRecordType.System: { var system = (SystemLogRecord)record; processSystem(system); break; } default: throw new ArgumentOutOfRangeException(); } result = chunk.TryReadClosestForward((int)result.NextPosition); } }
public override void SetUp() { base.SetUp(); _chunk = TFChunk.CreateNew(Filename, 1024, 0, false); }
private static PosMap WriteRecord(TFChunk.TFChunk newChunk, LogRecord record) { var writeResult = newChunk.TryAppend(record); if (!writeResult.Success) { throw new Exception(string.Format( "Unable to append record during scavenging. Scavenge position: {0}, Record: {1}.", writeResult.OldPosition, record)); } long logPos = newChunk.ChunkHeader.GetLocalLogPosition(record.LogPosition); int actualPos = (int) writeResult.OldPosition; return new PosMap(logPos, actualPos); }