internal TFChunkBulkReader(TFChunk chunk, Stream streamToUse) { Ensure.NotNull(chunk, "chunk"); Ensure.NotNull(streamToUse, "stream"); _chunk = chunk; _stream = streamToUse; }
public void AddChunk(TFChunk chunk) { Ensure.NotNull(chunk, "chunk"); _chunks[_chunksCount] = chunk; _chunksCount += 1; if (_cachingEnabled) { int uncacheIndex = _chunksCount - _config.CachedChunkCount - 1; if (uncacheIndex >= 0) { _chunksQueue.Enqueue(_chunks[uncacheIndex]); EnsureBackgroundWorkerRunning(); } if (_cachingEnabled) { if (!chunk.IsReadOnly) { CacheUncacheIfNecessary(chunk); } else { _chunksQueue.Enqueue(chunk); EnsureBackgroundWorkerRunning(); } } } }
public void Setup() { var record = new PrepareLogRecord(15556, _corrId, _eventId, 15556, 0, "test", 1, new DateTime(2000, 1, 1, 12, 0, 0), PrepareFlags.None, "Foo", new byte[12], new byte[15]); _chunk = TFChunk.CreateNew(_filename, 20, 0, 0); _written = _chunk.TryAppend(record).Success; }
public void setup() { chunk = TFChunk.CreateNew(_filename, 1000, 0, 0); var reader = chunk.AcquireReader(); chunk.MarkForDeletion(); reader.Release(); }
public bool Write(LogRecord record, out long newPos) { var chunkNum = (int)(_writerPos / _db.Config.ChunkSize); var chunkPos = _writerPos % _db.Config.ChunkSize; var result = _writerChunk.TryAppend(record); if (result.Success) { Debug.Assert(result.OldPosition == chunkPos); _writerPos = chunkNum * (long)_db.Config.ChunkSize + result.NewPosition; } else { _writerChunk.Flush(); _writerChunk.Complete(); _writerChunk = _db.Manager.AddNewChunk(); //_writerCheckpoint.Flush(); //flush our checkpoint _writerPos = _writerChunk.ChunkHeader.ChunkStartNumber * (long)_db.Config.ChunkSize; // we just moved to a new chunk at pos 0 //GFY CANT USE chunkNum here (it could be exact at end) } _writerCheckpoint.Write(_writerPos); newPos = _writerPos; return(result.Success); }
private TFChunk LoadLastChunk(string chunkFileName) { var writePosition = (int)(Config.WriterCheckpoint.Read() % Config.ChunkSize); var chunk = TFChunk.FromOngoingFile(chunkFileName, writePosition); return(chunk); }
public TFChunk SwapChunk(int chunkNumber, TFChunk newChunk) { var oldChunk = Interlocked.Exchange(ref _chunks[chunkNumber], newChunk); oldChunk.UnCacheFromMemory(); TryCacheChunk(newChunk); return(oldChunk); }
public void Setup() { _record = new PrepareLogRecord(0, _corrId, _eventId, 0, 0, "test", 1, new DateTime(2000, 1, 1, 12, 0, 0), PrepareFlags.None, "Foo", new byte[12], new byte[15]); _chunk = TFChunk.CreateNew(_filename, 4096, 0, 0); _result = _chunk.TryAppend(_record); _chunk.Flush(); }
public TFChunk AddNewChunk() { var chunkNumber = _chunksCount; var chunkName = _config.FileNamingStrategy.GetFilenameFor(chunkNumber); var chunk = TFChunk.CreateNew(chunkName, _config.ChunkSize, chunkNumber, 0); AddChunk(chunk); return(chunk); }
public void CompleteChunk() { _writerChunk.Flush(); _writerChunk.Complete(); _writerCheckpoint.Flush(); //flush our checkpoint _writerChunk = _db.Manager.AddNewChunk(); _writerPos = _writerChunk.ChunkHeader.ChunkStartNumber * (long)_db.Config.ChunkSize; _writerCheckpoint.Write(_writerPos); }
public void Setup() { _record = new PrepareLogRecord(0, _corrId, _eventId, 0, 0, "test", 1, new DateTime(2000, 1, 1, 12, 0, 0), PrepareFlags.None, "Foo", new byte[12], new byte[15]); _chunk = TFChunk.CreateNew(_filename, 4096, 0, 0); _result = _chunk.TryAppend(_record); _chunk.Flush(); _chunk.Complete(); _cachedChunk = TFChunk.FromCompletedFile(_filename, verifyHash: true); _cachedChunk.CacheInMemory(); }
public TFChunkWriter(TFChunkDb db) { Ensure.NotNull(db, "db"); _db = db; _writerCheckpoint = db.Config.WriterCheckpoint; _writerPos = _writerCheckpoint.Read(); _writerChunk = db.Manager.GetChunkFor(_writerPos); if (_writerChunk == null) throw new InvalidOperationException("No chunk given for existing position."); }
public void AddReplicatedChunk(TFChunk replicatedChunk, bool verifyHash) { Ensure.NotNull(replicatedChunk, "replicatedChunk"); if (!replicatedChunk.IsReadOnly) { throw new ArgumentException(string.Format("Passed TFChunk is not completed: {0}.", replicatedChunk.FileName)); } var chunkHeader = replicatedChunk.ChunkHeader; var oldFileName = replicatedChunk.FileName; var newFileName = _config.FileNamingStrategy.GetFilenameFor(chunkHeader.ChunkStartNumber, chunkHeader.ChunkScavengeVersion); replicatedChunk.Dispose(); try { replicatedChunk.WaitForDestroy(0); // should happen immediately } catch (TimeoutException exc) { throw new Exception(string.Format("Replicated chunk '{0}' ({1}-{2}) is used by someone else.", replicatedChunk.FileName, replicatedChunk.ChunkHeader.ChunkStartNumber, replicatedChunk.ChunkHeader.ChunkEndNumber), exc); } //TODO AN: temporary workaround for (int i = chunkHeader.ChunkStartNumber; i <= chunkHeader.ChunkEndNumber; ++i) { var oldChunk = _chunks[i]; if (oldChunk != null) { oldChunk.MarkForDeletion(); oldChunk.WaitForDestroy(500); } } // TODO AN it is possible that chunk with the newFileName already exists, need to work around that // TODO AN this could be caused by scavenging... no scavenge -- no cry :( File.Move(oldFileName, newFileName); var newChunk = TFChunk.FromCompletedFile(newFileName, verifyHash); for (int i = chunkHeader.ChunkStartNumber; i <= chunkHeader.ChunkEndNumber; ++i) { var oldChunk = Interlocked.Exchange(ref _chunks[i], newChunk); if (oldChunk != null) { oldChunk.MarkForDeletion(); } } _chunksCount = newChunk.ChunkHeader.ChunkEndNumber + 1; Debug.Assert(_chunks[_chunksCount] == null); TryCacheChunk(newChunk); }
public void CompleteRawChunk(TFChunk rawChunk) { rawChunk.Flush(); rawChunk.CompleteRaw(); _db.Manager.AddReplicatedChunk(rawChunk, verifyHash: true); _writerChunk = _db.Manager.AddNewChunk(); _writerPos = _writerChunk.ChunkHeader.ChunkStartNumber * (long)_db.Config.ChunkSize; _writerCheckpoint.Write(_writerPos); _writerCheckpoint.Flush(); }
private TFChunk LoadLastChunk(string chunkFileName, bool verifyHash) { var pos = Config.WriterCheckpoint.Read(); var writerPosition = (int)(pos % Config.ChunkSize); if (writerPosition == 0 && pos > 0) { writerPosition = Config.ChunkSize; } return(TFChunk.FromOngoingFile(chunkFileName, writerPosition, checkSize: false)); }
public void CompleteReplicatedRawChunk(TFChunk.TFChunk rawChunk) { _currentChunk = null; // in case creation of new chunk fails, we shouldn't use completed chunk for write rawChunk.CompleteRaw(); _db.Manager.SwitchChunk(rawChunk, verifyHash: true, removeChunksWithGreaterNumbers: true); _writerCheckpoint.Write(rawChunk.ChunkHeader.ChunkEndPosition); _writerCheckpoint.Flush(); _currentChunk = _db.Manager.AddNewChunk(); }
public TFChunkWriter(TFChunkDb db) { Ensure.NotNull(db, "db"); _db = db; _writerCheckpoint = db.Config.WriterCheckpoint; _writerPos = _writerCheckpoint.Read(); _writerChunk = db.Manager.GetChunkFor(_writerPos); if (_writerChunk == null) { throw new InvalidOperationException("No chunk given for existing position."); } }
private void CacheUncacheIfNecessary(TFChunk chunk) { var chunkNumber = chunk.ChunkHeader.ChunkStartNumber; if (_cachingEnabled && _chunksCount - chunkNumber <= _config.CachedChunkCount && ReferenceEquals(chunk, _chunks[chunkNumber])) { chunk.CacheInMemory(); } else { chunk.UnCacheFromMemory(); } }
public static TFChunk FromOngoingFile(string filename, int writePosition) { var chunk = new TFChunk(filename, TFConsts.TFChunkReaderCount, TFConsts.MidpointsDepth); try { chunk.InitOngoing(writePosition); } catch { chunk.Dispose(); throw; } return(chunk); }
public static TFChunk FromCompletedFile(string filename, bool verifyHash) { var chunk = new TFChunk(filename, TFConsts.TFChunkReaderCount, TFConsts.MidpointsDepth); try { chunk.InitCompleted(verifyHash); } catch { chunk.Dispose(); throw; } return(chunk); }
private void TryCacheChunk(TFChunk chunk) { if (_cachingEnabled) { if (!chunk.IsReadOnly) { CacheUncacheIfNecessary(chunk); } else { _chunksQueue.Enqueue(chunk); EnsureBackgroundWorkerRunning(); } } }
public static TFChunk CreateNew(string filename, int chunkSize, int chunkNumber, int chunkScavengeVersion) { var chunkHeader = new ChunkHeader(CurrentChunkVersion, chunkSize, chunkNumber, chunkNumber, chunkScavengeVersion); var chunk = new TFChunk(filename, TFConsts.TFChunkReaderCount, TFConsts.MidpointsDepth); try { chunk.InitNew(chunkHeader); } catch { chunk.Dispose(); throw; } return(chunk); }
public void Setup() { _prepare1 = new PrepareLogRecord(0, _corrId, _eventId, 0, "test", 1, new DateTime(2000, 1, 1, 12, 0, 0), PrepareFlags.None, "Foo", new byte[12], new byte[15]); _prepare2 = new PrepareLogRecord(0, _corrId, _eventId, 0, "test2", 2, new DateTime(2000, 1, 1, 12, 0, 0), PrepareFlags.None, "Foo2", new byte[12], new byte[15]); _chunk = TFChunk.CreateNew(_filename, 4096, 0, 0); var r1 = _chunk.TryAppend(_prepare1); _written1 = r1.Success; _position1 = r1.OldPosition; var r2 = _chunk.TryAppend(_prepare2); _written2 = r2.Success; _position2 = r2.OldPosition; _chunk.Flush(); }
private static PosMap WriteRecord(TFChunk newChunk, LogRecord record) { var writeResult = newChunk.TryAppend(record); if (!writeResult.Success) { throw new Exception(string.Format( "Unable to append record during scavenging. Scavenge position: {0}, Record: {1}.", writeResult.OldPosition, record)); } int logPos = (int)(record.Position % newChunk.ChunkHeader.ChunkSize); int actualPos = (int)writeResult.OldPosition; return(new PosMap(logPos, actualPos)); }
public TFChunk AddNewChunk(ChunkHeader chunkHeader, int fileSize) { Ensure.NotNull(chunkHeader, "chunkHeader"); Ensure.Positive(fileSize, "fileSize"); if (chunkHeader.ChunkStartNumber != _chunksCount) { throw new Exception(string.Format("Received request to create a new ongoing chunk {0}-{1}, but current chunks count is {2}.", chunkHeader.ChunkStartNumber, chunkHeader.ChunkEndNumber, _chunksCount)); } var chunkNumber = _chunksCount; var chunkName = _config.FileNamingStrategy.GetFilenameFor(chunkNumber); var chunk = TFChunk.CreateWithHeader(chunkName, chunkHeader, fileSize); AddChunk(chunk); return(chunk); }
public TFChunk SwapChunk(int chunkNumber, TFChunk newChunk) { var oldChunk = Interlocked.Exchange(ref _chunks[chunkNumber], newChunk); oldChunk.UnCacheFromMemory(); if (_cachingEnabled) { if (!newChunk.IsReadOnly) { CacheUncacheIfNecessary(newChunk); } else { _chunksQueue.Enqueue(newChunk); EnsureBackgroundWorkerRunning(); } } return(oldChunk); }
public bool Write(LogRecord record, out long newPos) { var chunkNum = (int)(_writerPos / _db.Config.ChunkSize); var chunkPos = _writerPos % _db.Config.ChunkSize; var result = _writerChunk.TryAppend(record); if (result.Success) { Debug.Assert(result.OldPosition == chunkPos); _writerPos = chunkNum * (long)_db.Config.ChunkSize + result.NewPosition; } else { _writerChunk.Flush(); _writerChunk.Complete(); _writerCheckpoint.Flush(); //flush our checkpoint _writerChunk = _db.Manager.AddNewChunk(); _writerPos = (chunkNum + 1) * (long)_db.Config.ChunkSize; // we just moved to a new chunk at pos 0 } _writerCheckpoint.Write(_writerPos); newPos = _writerPos; return result.Success; }
private bool ReplaceChunksWith(TFChunk.TFChunk newChunk, string chunkExplanation) { var chunkStartNumber = newChunk.ChunkHeader.ChunkStartNumber; var chunkEndNumber = newChunk.ChunkHeader.ChunkEndNumber; for (int i = chunkStartNumber; i <= chunkEndNumber;) { var chunk = _chunks[i]; if (chunk != null) { var chunkHeader = chunk.ChunkHeader; if (chunkHeader.ChunkStartNumber < chunkStartNumber || chunkHeader.ChunkEndNumber > chunkEndNumber) return false; i = chunkHeader.ChunkEndNumber + 1; } else { //Cover the case of initial replication of merged chunks where they were never set // in the map in the first place. i = i + 1; } } TFChunk.TFChunk lastRemovedChunk = null; for (int i = chunkStartNumber; i <= chunkEndNumber; i += 1) { var oldChunk = Interlocked.Exchange(ref _chunks[i], newChunk); if (oldChunk != null && !ReferenceEquals(lastRemovedChunk, oldChunk)) { oldChunk.MarkForDeletion(); Log.Info("{0} chunk #{1} is marked for deletion.", chunkExplanation, oldChunk); } lastRemovedChunk = oldChunk; } return true; }
public void AddChunk(TFChunk.TFChunk chunk) { Ensure.NotNull(chunk, "chunk"); lock (_chunksLocker) { for (int i = chunk.ChunkHeader.ChunkStartNumber; i <= chunk.ChunkHeader.ChunkEndNumber; ++i) { _chunks[i] = chunk; } _chunksCount = chunk.ChunkHeader.ChunkEndNumber + 1; TryCacheChunk(chunk); } }
public TFChunk.TFChunk SwitchChunk(TFChunk.TFChunk chunk, bool verifyHash, bool replaceChunksWithGreaterNumbers) { Ensure.NotNull(chunk, "chunk"); if (!chunk.IsReadOnly) throw new ArgumentException(string.Format("Passed TFChunk is not completed: {0}.", chunk.FileName)); var chunkHeader = chunk.ChunkHeader; var oldFileName = chunk.FileName; Log.Info("Switching chunk #{0}-{1} ({2})...", chunkHeader.ChunkStartNumber, chunkHeader.ChunkEndNumber, oldFileName); chunk.Dispose(); try { chunk.WaitForDestroy(0); // should happen immediately } catch (TimeoutException exc) { throw new Exception(string.Format("The chunk that is being switched #{0}-{1} ({2}) is used by someone else.", chunk.ChunkHeader.ChunkStartNumber, chunk.ChunkHeader.ChunkEndNumber, chunk.FileName), exc); } var newFileName = _config.FileNamingStrategy.DetermineBestVersionFilenameFor(chunkHeader.ChunkStartNumber); Log.Info("File {0} will be moved to file {1}", oldFileName, newFileName); File.Move(oldFileName, newFileName); var newChunk = TFChunk.TFChunk.FromCompletedFile(newFileName, verifyHash); for (int i = chunkHeader.ChunkStartNumber; i <= chunkHeader.ChunkEndNumber; ++i) { var oldChunk = Interlocked.Exchange(ref _chunks[i], newChunk); if (oldChunk != null) { oldChunk.MarkForDeletion(); Log.Info("Old chunk {0} is marked for deletion.", oldChunk.FileName); } } if (replaceChunksWithGreaterNumbers) { var oldChunksCount = _chunksCount; _chunksCount = newChunk.ChunkHeader.ChunkEndNumber + 1; for (int i = chunkHeader.ChunkEndNumber + 1; i < oldChunksCount; ++i) { var oldChunk = Interlocked.Exchange(ref _chunks[i], null); if (oldChunk != null) { oldChunk.MarkForDeletion(); Log.Info("Excessive chunk {0} is marked for deletion.", oldChunk.FileName); } } Debug.Assert(_chunks[_chunksCount] == null); } TryCacheChunk(newChunk); return newChunk; }
public static TFChunk FromOngoingFile(string filename, int writePosition) { var chunk = new TFChunk(filename, TFConsts.TFChunkReaderCount, TFConsts.MidpointsDepth); try { chunk.InitOngoing(writePosition); } catch { chunk.Dispose(); throw; } return chunk; }
public static TFChunk CreateNew(string filename, int chunkSize, int chunkNumber, int chunkScavengeVersion) { var chunkHeader = new ChunkHeader(CurrentChunkVersion, chunkSize, chunkNumber, chunkNumber, chunkScavengeVersion); var chunk = new TFChunk(filename, TFConsts.TFChunkReaderCount, TFConsts.MidpointsDepth); try { chunk.InitNew(chunkHeader); } catch { chunk.Dispose(); throw; } return chunk; }
private void TryCacheChunk(TFChunk.TFChunk chunk) { if (_cachingEnabled) { if (!chunk.IsReadOnly) { CacheUncacheIfNecessary(chunk); } else { _chunksQueue.Enqueue(chunk); EnsureBackgroundWorkerRunning(); } } }
private bool ReplaceChunksWith(TFChunk.TFChunk newChunk, string chunkExplanation) { var chunkStartNumber = newChunk.ChunkHeader.ChunkStartNumber; var chunkEndNumber = newChunk.ChunkHeader.ChunkEndNumber; for (int i = chunkStartNumber; i <= chunkEndNumber;) { var chunkHeader = _chunks[i].ChunkHeader; if (chunkHeader.ChunkStartNumber < chunkStartNumber || chunkHeader.ChunkEndNumber > chunkEndNumber) return false; i = chunkHeader.ChunkEndNumber + 1; } TFChunk.TFChunk lastRemovedChunk = null; for (int i = chunkStartNumber; i <= chunkEndNumber; i += 1) { var oldChunk = Interlocked.Exchange(ref _chunks[i], newChunk); if (oldChunk != null && !ReferenceEquals(lastRemovedChunk, oldChunk)) { oldChunk.MarkForDeletion(); Log.Info("{0} chunk #{1}-{2} ({3}) is marked for deletion.", chunkExplanation, oldChunk.ChunkHeader.ChunkStartNumber, oldChunk.ChunkHeader.ChunkEndNumber, oldChunk.FileName); } lastRemovedChunk = oldChunk; } return true; }
public TFChunk CreateTempChunk(ChunkHeader chunkHeader, int fileSize) { var chunkFileName = _config.FileNamingStrategy.GetTempFilename(); return(TFChunk.CreateWithHeader(chunkFileName, chunkHeader, fileSize)); }
private void ScavengeChunk(TFChunk oldChunk) { var sw = Stopwatch.StartNew(); var chunkNumber = oldChunk.ChunkHeader.ChunkStartNumber; var newScavengeVersion = oldChunk.ChunkHeader.ChunkScavengeVersion + 1; var chunkSize = oldChunk.ChunkHeader.ChunkSize; var tmpChunkPath = Path.Combine(_db.Config.Path, Guid.NewGuid() + ".scavenge.tmp"); var newChunkPath = _db.Config.FileNamingStrategy.GetFilenameFor(chunkNumber, newScavengeVersion); Log.Trace("Scavenging chunk #{0} ({1}) started. Scavenged chunk: {2} --> {3}.", chunkNumber, Path.GetFileName(oldChunk.FileName), Path.GetFileName(tmpChunkPath), Path.GetFileName(newChunkPath)); TFChunk newChunk; try { newChunk = TFChunk.CreateNew(tmpChunkPath, chunkSize, chunkNumber, newScavengeVersion); } catch (IOException exc) { Log.ErrorException(exc, "IOException during creating new chunk for scavenging purposes. Ignoring..."); return; } var positionMapping = new List <PosMap>(); var positioningNeeded = false; var result = oldChunk.TryReadFirst(); int cnt = 0; while (result.Success) { cnt += 1; var record = result.LogRecord; switch (record.RecordType) { case LogRecordType.Prepare: { var prepare = (PrepareLogRecord)record; if (!_readIndex.IsStreamDeleted(prepare.EventStreamId) || (prepare.Flags & PrepareFlags.StreamDelete) != 0) // delete tombstone should be left { var posMap = WriteRecord(newChunk, record); positionMapping.Add(posMap); positioningNeeded = posMap.LogPos != posMap.ActualPos; } break; } case LogRecordType.Commit: { var posMap = WriteRecord(newChunk, record); positionMapping.Add(posMap); positioningNeeded = posMap.LogPos != posMap.ActualPos; break; } default: throw new ArgumentOutOfRangeException(); } if (result.NextPosition == -1) { break; } result = oldChunk.TryReadSameOrClosest((int)result.NextPosition); } var oldSize = oldChunk.ChunkFooter.ActualChunkSize + oldChunk.ChunkFooter.MapSize + ChunkHeader.Size + ChunkFooter.Size; var newSize = newChunk.ActualDataSize + (positioningNeeded ? sizeof(ulong) * positionMapping.Count : 0) + ChunkHeader.Size + ChunkFooter.Size; if (false && oldSize <= newSize) { Log.Trace("Scavenging of chunk #{0} ({1}) completed in {2}.\n" + "Old version is kept as it is smaller.\n" + "Old chunk size: {3}, scavenged size: {4}.\n" + "Scavenged chunk removed.", chunkNumber, oldChunk.FileName, sw.Elapsed, oldSize, newSize); newChunk.Dispose(); File.Delete(newChunk.FileName); } else { newChunk.CompleteScavenge(positioningNeeded ? positionMapping : null); newChunk.Dispose(); File.Move(tmpChunkPath, newChunkPath); newChunk = TFChunk.FromCompletedFile(newChunkPath); var removedChunk = _db.Manager.SwapChunk(chunkNumber, newChunk); Debug.Assert(ReferenceEquals(removedChunk, oldChunk)); // only scavenging could switch, so old should be always same oldChunk.MarkForDeletion(); Log.Trace("Scavenging of chunk #{0} ({1}) completed in {2} into ({3} --> {4}).\n" + "Old size: {5}, new size: {6}, new scavenge version: {7}.", chunkNumber, Path.GetFileName(oldChunk.FileName), sw.Elapsed, Path.GetFileName(tmpChunkPath), Path.GetFileName(newChunkPath), oldSize, newSize, newScavengeVersion); } }
private TFChunk LoadChunk(string chunkFileName) { var chunk = TFChunk.FromCompletedFile(chunkFileName); return(chunk); }
private static PosMap WriteRecord(TFChunk newChunk, LogRecord record) { var writeResult = newChunk.TryAppend(record); if (!writeResult.Success) { throw new Exception(string.Format( "Unable to append record during scavenging. Scavenge position: {0}, Record: {1}.", writeResult.OldPosition, record)); } int logPos = (int) (record.Position%newChunk.ChunkHeader.ChunkSize); int actualPos = (int) writeResult.OldPosition; return new PosMap(logPos, actualPos); }
internal TFChunkBulkReader(TFChunk chunk) { _chunk = chunk; }
private void ScavengeChunk(TFChunk oldChunk) { var sw = Stopwatch.StartNew(); var chunkNumber = oldChunk.ChunkHeader.ChunkStartNumber; var newScavengeVersion = oldChunk.ChunkHeader.ChunkScavengeVersion + 1; var chunkSize = oldChunk.ChunkHeader.ChunkSize; var tmpChunkPath = Path.Combine(_db.Config.Path, Guid.NewGuid() + ".scavenge.tmp"); var newChunkPath = _db.Config.FileNamingStrategy.GetFilenameFor(chunkNumber, newScavengeVersion); Log.Trace("Scavenging chunk #{0} ({1}) started. Scavenged chunk: {2} --> {3}.", chunkNumber, Path.GetFileName(oldChunk.FileName), Path.GetFileName(tmpChunkPath), Path.GetFileName(newChunkPath)); TFChunk newChunk; try { newChunk = TFChunk.CreateNew(tmpChunkPath, chunkSize, chunkNumber, newScavengeVersion); } catch (IOException exc) { Log.ErrorException(exc, "IOException during creating new chunk for scavenging purposes. Ignoring..."); return; } var positionMapping = new List<PosMap>(); var positioningNeeded = false; var result = oldChunk.TryReadFirst(); int cnt = 0; while (result.Success) { cnt += 1; var record = result.LogRecord; switch (record.RecordType) { case LogRecordType.Prepare: { var prepare = (PrepareLogRecord) record; if (!_readIndex.IsStreamDeleted(prepare.EventStreamId) || (prepare.Flags & PrepareFlags.StreamDelete) != 0) // delete tombstone should be left { var posMap = WriteRecord(newChunk, record); positionMapping.Add(posMap); positioningNeeded = posMap.LogPos != posMap.ActualPos; } break; } case LogRecordType.Commit: { var posMap = WriteRecord(newChunk, record); positionMapping.Add(posMap); positioningNeeded = posMap.LogPos != posMap.ActualPos; break; } default: throw new ArgumentOutOfRangeException(); } if (result.NextPosition == -1) break; result = oldChunk.TryReadSameOrClosest((int)result.NextPosition); } var oldSize = oldChunk.ChunkFooter.ActualChunkSize + oldChunk.ChunkFooter.MapSize + ChunkHeader.Size + ChunkFooter.Size; var newSize = newChunk.ActualDataSize + (positioningNeeded ? sizeof(ulong) * positionMapping.Count : 0) + ChunkHeader.Size + ChunkFooter.Size; if (false && oldSize <= newSize) { Log.Trace("Scavenging of chunk #{0} ({1}) completed in {2}.\n" + "Old version is kept as it is smaller.\n" + "Old chunk size: {3}, scavenged size: {4}.\n" + "Scavenged chunk removed.", chunkNumber, oldChunk.FileName, sw.Elapsed, oldSize, newSize); newChunk.Dispose(); File.Delete(newChunk.FileName); } else { newChunk.CompleteScavenge(positioningNeeded ? positionMapping : null); newChunk.Dispose(); File.Move(tmpChunkPath, newChunkPath); newChunk = TFChunk.FromCompletedFile(newChunkPath); var removedChunk = _db.Manager.SwapChunk(chunkNumber, newChunk); Debug.Assert(ReferenceEquals(removedChunk, oldChunk)); // only scavenging could switch, so old should be always same oldChunk.MarkForDeletion(); Log.Trace("Scavenging of chunk #{0} ({1}) completed in {2} into ({3} --> {4}).\n" + "Old size: {5}, new size: {6}, new scavenge version: {7}.", chunkNumber, Path.GetFileName(oldChunk.FileName), sw.Elapsed, Path.GetFileName(tmpChunkPath), Path.GetFileName(newChunkPath), oldSize, newSize, newScavengeVersion); } }
public static TFChunk FromCompletedFile(string filename, bool verifyHash) { var chunk = new TFChunk(filename, TFConsts.TFChunkReaderCount, TFConsts.MidpointsDepth); try { chunk.InitCompleted(verifyHash); } catch { chunk.Dispose(); throw; } return chunk; }
public void Setup() { _chunk = TFChunk.CreateNew(filename, 4096, 0, 0); _chunk.Complete(); _testChunk = TFChunk.FromCompletedFile(filename); }
public void Setup() { _chunk = TFChunk.CreateNew(_filename, 4096, 0, 0); _chunk.Complete(); _testChunk = TFChunk.FromCompletedFile(_filename, verifyHash: true); }
public void setup() { _chunk = TFChunk.CreateNew(_filename, 1000, 0, 0); _reader = _chunk.AcquireReader(); _chunk.MarkForDeletion(); }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); _db = new TFChunkDb(new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 16 * 1024, 0, new InMemoryCheckpoint(), new ICheckpoint[0])); _db.OpenVerifyAndClean(); var chunk = _db.Manager.GetChunk(0); _rec1 = LogRecord.SingleWrite(0, Guid.NewGuid(), Guid.NewGuid(), "es1", ExpectedVersion.Any, "et1", new byte[] { 0, 1, 2 }, new byte[] { 5, 7 }); _res1 = chunk.TryAppend(_rec1); _rec2 = LogRecord.SingleWrite(_res1.NewPosition, Guid.NewGuid(), Guid.NewGuid(), "es-to-scavenge", ExpectedVersion.Any, "et1", new byte[] { 0, 1, 2 }, new byte[] { 5, 7 }); _res2 = chunk.TryAppend(_rec2); _rec3 = LogRecord.SingleWrite(_res2.NewPosition, Guid.NewGuid(), Guid.NewGuid(), "es-to-scavenge", ExpectedVersion.Any, "et1", new byte[] { 0, 1, 2 }, new byte[] { 5, 7 }); _res3 = chunk.TryAppend(_rec3); chunk.Complete(); var scavenger = new TFChunkScavenger(_db, new FakeReadIndex(x => x == "es-to-scavenge")); scavenger.Scavenge(alwaysKeepScavenged: true); _scavengedChunk = _db.Manager.GetChunk(0); }
public TFChunk.TFChunk SwitchChunk(TFChunk.TFChunk chunk, bool verifyHash, bool removeChunksWithGreaterNumbers) { Ensure.NotNull(chunk, "chunk"); if (!chunk.IsReadOnly) throw new ArgumentException(string.Format("Passed TFChunk is not completed: {0}.", chunk.FileName)); var chunkHeader = chunk.ChunkHeader; var oldFileName = chunk.FileName; Log.Info("Switching chunk #{0}-{1} ({2})...", chunkHeader.ChunkStartNumber, chunkHeader.ChunkEndNumber, oldFileName); chunk.Dispose(); try { chunk.WaitForDestroy(0); // should happen immediately } catch (TimeoutException exc) { throw new Exception(string.Format("The chunk that is being switched #{0}-{1} ({2}) is used by someone else.", chunkHeader.ChunkStartNumber, chunkHeader.ChunkEndNumber, oldFileName), exc); } var newFileName = _config.FileNamingStrategy.DetermineBestVersionFilenameFor(chunkHeader.ChunkStartNumber); Log.Info("File {0} will be moved to file {1}", oldFileName, newFileName); File.Move(oldFileName, newFileName); var newChunk = TFChunk.TFChunk.FromCompletedFile(newFileName, verifyHash); lock (_chunksLocker) { if (!ReplaceChunksWith(newChunk, "Old")) { Log.Info("Chunk #{0}-{1} ({2}) will be not switched, marking for remove...", chunkHeader.ChunkStartNumber, chunkHeader.ChunkEndNumber,newFileName); newChunk.MarkForDeletion(); } if (removeChunksWithGreaterNumbers) { var oldChunksCount = _chunksCount; _chunksCount = newChunk.ChunkHeader.ChunkEndNumber + 1; RemoveChunks(chunkHeader.ChunkEndNumber + 1, oldChunksCount-1, "Excessive"); if (_chunks[_chunksCount] != null) throw new Exception(string.Format("Excessive chunk #{0} found after raw replication switch.", _chunksCount)); } TryCacheChunk(newChunk); return newChunk; } }
private void TryCacheChunk(TFChunk.TFChunk chunk) { if (!_cachingEnabled) return; Interlocked.Increment(ref _backgroundPassesRemaining); if (Interlocked.CompareExchange(ref _backgroundRunning, 1, 0) == 0) ThreadPool.QueueUserWorkItem(BackgroundCachingProcess); if (!chunk.IsReadOnly && chunk.ChunkHeader.ChunkSize + ChunkHeader.Size + ChunkFooter.Size <= _config.MaxChunksCacheSize) chunk.CacheInMemory(); }
private TFChunk LoadChunk(string chunkFileName, bool verifyHash) { var chunk = TFChunk.FromCompletedFile(chunkFileName, verifyHash); return(chunk); }
public void Setup() { _chunk = TFChunk.CreateNew(_filename, 1000, 0, 0); _chunk.MarkForDeletion(); }
public void AddChunk(TFChunk.TFChunk chunk) { Ensure.NotNull(chunk, "chunk"); _chunks[_chunksCount] = chunk; _chunksCount += 1; if (_cachingEnabled) { int uncacheIndex = _chunksCount - _config.CachedChunkCount - 1; if (uncacheIndex >= 0) { _chunksQueue.Enqueue(_chunks[uncacheIndex]); EnsureBackgroundWorkerRunning(); } if (_cachingEnabled) { if (!chunk.IsReadOnly) CacheUncacheIfNecessary(chunk); else { _chunksQueue.Enqueue(chunk); EnsureBackgroundWorkerRunning(); } } } }