internal TFChunkBulkReader(TFChunk.TFChunk chunk, Stream streamToUse) { Ensure.NotNull(chunk, "chunk"); Ensure.NotNull(streamToUse, "stream"); _chunk = chunk; _stream = streamToUse; }
private bool ReplaceChunksWith(TFChunk.TFChunk newChunk, string chunkExplanation) { var chunkStartNumber = newChunk.ChunkHeader.ChunkStartNumber; var chunkEndNumber = newChunk.ChunkHeader.ChunkEndNumber; for (int i = chunkStartNumber; i <= chunkEndNumber;) { var chunkHeader = _chunks[i].ChunkHeader; if (chunkHeader.ChunkStartNumber < chunkStartNumber || chunkHeader.ChunkEndNumber > chunkEndNumber) { return(false); } i = chunkHeader.ChunkEndNumber + 1; } TFChunk.TFChunk lastRemovedChunk = null; for (int i = chunkStartNumber; i <= chunkEndNumber; i += 1) { var oldChunk = Interlocked.Exchange(ref _chunks[i], newChunk); if (oldChunk != null && !ReferenceEquals(lastRemovedChunk, oldChunk)) { oldChunk.MarkForDeletion(); Log.Info("{0} chunk #{1} is marked for deletion.", chunkExplanation, oldChunk); } lastRemovedChunk = oldChunk; } return(true); }
public TFChunkReadSideUnscavenged(TFChunk chunk) : base(chunk) { if (chunk.ChunkHeader.IsScavenged) { throw new ArgumentException("Scavenged TFChunk passed into unscavenged chunk read side."); } }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); _chunk = TFChunk.CreateNew(Filename, 4096, 0, 0, false); _chunk.Complete(); _testChunk = TFChunk.FromCompletedFile(Filename, verifyHash: true); }
private void TraverseChunk(TFChunk.TFChunk chunk, Action<PrepareLogRecord, int> processPrepare, Action<CommitLogRecord, int> processCommit, Action<SystemLogRecord, int> processSystem) { var result = chunk.TryReadFirst(); while (result.Success) { var record = result.LogRecord; switch (record.RecordType) { case LogRecordType.Prepare: { var prepare = (PrepareLogRecord)record; processPrepare(prepare, result.RecordLength); break; } case LogRecordType.Commit: { var commit = (CommitLogRecord)record; processCommit(commit, result.RecordLength); break; } case LogRecordType.System: { var system = (SystemLogRecord)record; processSystem(system, result.RecordLength); break; } default: throw new ArgumentOutOfRangeException(); } result = chunk.TryReadClosestForward(result.NextPosition); } }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); _chunk = TFChunk.CreateNew(Filename, 4096, 0, 0, isScavenged: true); _chunk.CompleteScavenge(new PosMap[0]); _chunk.CacheInMemory(); }
public static TFChunk CreateWithHeader(string filename, ChunkHeader header, int fileSize, bool inMem, bool unbuffered, bool writethrough) { var chunk = new TFChunk(filename, ESConsts.TFChunkInitialReaderCount, ESConsts.TFChunkMaxReaderCount, TFConsts.MidpointsDepth, inMem, unbuffered, writethrough); try { chunk.InitNew(header, fileSize); } catch { chunk.Dispose(); throw; } return(chunk); }
public TFChunk.TFChunk SwitchChunk(TFChunk.TFChunk chunk, bool verifyHash, bool removeChunksWithGreaterNumbers) { Ensure.NotNull(chunk, "chunk"); if (!chunk.IsReadOnly) { throw new ArgumentException(string.Format("Passed TFChunk is not completed: {0}.", chunk.FileName)); } var chunkHeader = chunk.ChunkHeader; var oldFileName = chunk.FileName; Log.Info("Switching chunk #{0}-{1} ({2})...", chunkHeader.ChunkStartNumber, chunkHeader.ChunkEndNumber, Path.GetFileName(oldFileName)); TFChunk.TFChunk newChunk; if (_config.InMemDb) { newChunk = chunk; } else { chunk.Dispose(); try { chunk.WaitForDestroy(0); // should happen immediately } catch (TimeoutException exc) { throw new Exception( string.Format("The chunk that is being switched {0} is used by someone else.", chunk), exc); } var newFileName = _config.FileNamingStrategy.DetermineBestVersionFilenameFor(chunkHeader.ChunkStartNumber); Log.Info("File {0} will be moved to file {1}", Path.GetFileName(oldFileName), Path.GetFileName(newFileName)); File.Move(oldFileName, newFileName); newChunk = TFChunk.TFChunk.FromCompletedFile(newFileName, verifyHash, _config.Unbuffered); } lock (_chunksLocker) { if (!ReplaceChunksWith(newChunk, "Old")) { Log.Info("Chunk {0} will be not switched, marking for remove...", newChunk); newChunk.MarkForDeletion(); } if (removeChunksWithGreaterNumbers) { var oldChunksCount = _chunksCount; _chunksCount = newChunk.ChunkHeader.ChunkEndNumber + 1; RemoveChunks(chunkHeader.ChunkEndNumber + 1, oldChunksCount - 1, "Excessive"); if (_chunks[_chunksCount] != null) { throw new Exception(string.Format("Excessive chunk #{0} found after raw replication switch.", _chunksCount)); } } TryCacheChunk(newChunk); return(newChunk); } }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); _record = new PrepareLogRecord(0, _corrId, _eventId, 0, 0, "test", 1, new DateTime(2000, 1, 1, 12, 0, 0), PrepareFlags.None, "Foo", new byte[12], new byte[15]); _chunk = TFChunk.CreateNew(Filename, 4096, 0, 0, false); _result = _chunk.TryAppend(_record); }
public void Optimize(TFChunk.TFChunk chunk) { if (!chunk.ChunkHeader.IsScavenged) { return; } _cache.Put(chunk.FileName, chunk); }
public override void SetUp() { base.SetUp(); _chunk = TFChunk.CreateNew(Filename, 1000, 0, 0, false); var reader = _chunk.AcquireReader(); _chunk.MarkForDeletion(); reader.Release(); }
public override void SetUp() { base.SetUp(); var record = new PrepareLogRecord(15556, _corrId, _eventId, 15556, 0, "test", 1, new DateTime(2000, 1, 1, 12, 0, 0), PrepareFlags.None, "Foo", new byte[12], new byte[15]); _chunk = TFChunk.CreateNew(Filename, 20, 0, 0, false); _written = _chunk.TryAppend(record).Success; }
public TFChunkReadSideScavenged(TFChunk chunk) : base(chunk) { if (!chunk.ChunkHeader.IsScavenged) { throw new ArgumentException(string.Format("Chunk provided is not scavenged: {0}", chunk)); } }
public TFChunkWriter(TFChunkDb db) { Ensure.NotNull(db, "db"); _db = db; _writerCheckpoint = db.Config.WriterCheckpoint; _currentChunk = db.Manager.GetChunkFor(_writerCheckpoint.Read()); if (_currentChunk == null) throw new InvalidOperationException("No chunk given for existing position."); }
public void CompleteChunk() { var chunk = _currentChunk; _currentChunk = null; // in case creation of new chunk fails, we shouldn't use completed chunk for write chunk.Complete(); _writerCheckpoint.Write(chunk.ChunkHeader.ChunkEndPosition); _writerCheckpoint.Flush(); _currentChunk = _db.Manager.AddNewChunk(); }
public void CompleteReplicatedRawChunk(TFChunk.TFChunk rawChunk) { _currentChunk = null; // in case creation of new chunk fails, we shouldn't use completed chunk for write rawChunk.CompleteRaw(); _db.Manager.SwitchChunk(rawChunk, verifyHash: true, removeChunksWithGreaterNumbers: true); _writerCheckpoint.Write(rawChunk.ChunkHeader.ChunkEndPosition); _writerCheckpoint.Flush(); _currentChunk = _db.Manager.AddNewChunk(); }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); _record = new PrepareLogRecord(0, _corrId, _eventId, 0, 0, "test", 1, new DateTime(2000, 1, 1, 12, 0, 0), PrepareFlags.None, "Foo", new byte[12], new byte[15]); _chunk = TFChunk.CreateNew(Filename, 4096, 0, 0, false); _result = _chunk.TryAppend(_record); _chunk.Flush(); _chunk.Complete(); _cachedChunk = TFChunk.FromCompletedFile(Filename, verifyHash: true); _cachedChunk.CacheInMemory(); }
private bool ReplaceChunksWith(TFChunk.TFChunk newChunk, string chunkExplanation) { var chunkStartNumber = newChunk.ChunkHeader.ChunkStartNumber; var chunkEndNumber = newChunk.ChunkHeader.ChunkEndNumber; for (int i = chunkStartNumber; i <= chunkEndNumber;) { var chunk = _chunks[i]; if (chunk != null) { var chunkHeader = chunk.ChunkHeader; if (chunkHeader.ChunkStartNumber < chunkStartNumber || chunkHeader.ChunkEndNumber > chunkEndNumber) { return(false); } i = chunkHeader.ChunkEndNumber + 1; } else { //Cover the case of initial replication of merged chunks where they were never set // in the map in the first place. i = i + 1; } } TFChunk.TFChunk previousRemovedChunk = null; for (int i = chunkStartNumber; i <= chunkEndNumber; i += 1) { var oldChunk = Interlocked.Exchange(ref _chunks[i], newChunk); if (!ReferenceEquals(previousRemovedChunk, oldChunk)) { // Once we've swapped all entries for the previousRemovedChunk we can safely delete it. if (previousRemovedChunk != null) { previousRemovedChunk.MarkForDeletion(); Log.Info("{chunkExplanation} chunk #{oldChunk} is marked for deletion.", chunkExplanation, previousRemovedChunk); } previousRemovedChunk = oldChunk; } } if (previousRemovedChunk != null) { // Delete the last chunk swapped out now it's fully replaced. previousRemovedChunk.MarkForDeletion(); Log.Info("{chunkExplanation} chunk #{oldChunk} is marked for deletion.", chunkExplanation, previousRemovedChunk); } return(true); }
public TFChunkWriter(TFChunkDb db) { Ensure.NotNull(db, "db"); _db = db; _writerCheckpoint = db.Config.WriterCheckpoint; _currentChunk = db.Manager.GetChunkFor(_writerCheckpoint.Read()); if (_currentChunk == null) { throw new InvalidOperationException("No chunk given for existing position."); } }
private void RemoveChunks(int chunkStartNumber, int chunkEndNumber, string chunkExplanation) { TFChunk.TFChunk lastRemovedChunk = null; for (int i = chunkStartNumber; i <= chunkEndNumber; i += 1) { var oldChunk = Interlocked.Exchange(ref _chunks[i], null); if (oldChunk != null && !ReferenceEquals(lastRemovedChunk, oldChunk)) { oldChunk.MarkForDeletion(); Log.Info("{0} chunk {1} is marked for deletion.", chunkExplanation, oldChunk); } lastRemovedChunk = oldChunk; } }
private static PosMap WriteRecord(TFChunk.TFChunk newChunk, LogRecord record) { var writeResult = newChunk.TryAppend(record); if (!writeResult.Success) { throw new Exception(string.Format( "Unable to append record during scavenging. Scavenge position: {0}, Record: {1}.", writeResult.OldPosition, record)); } long logPos = newChunk.ChunkHeader.GetLocalLogPosition(record.LogPosition); int actualPos = (int) writeResult.OldPosition; return new PosMap(logPos, actualPos); }
private void TraverseChunkBasic(TFChunk.TFChunk chunk, CancellationToken ct, Action <CandidateRecord> process) { var result = chunk.TryReadFirst(); while (result.Success) { process(new CandidateRecord(result.LogRecord, result.RecordLength)); ct.ThrowIfCancellationRequested(); result = chunk.TryReadClosestForward(result.NextPosition); } }
public static TFChunk FromCompletedFile(string filename, bool verifyHash) { var chunk = new TFChunk(filename, ESConsts.TFChunkInitialReaderCount, ESConsts.TFChunkMaxReaderCount, TFConsts.MidpointsDepth); try { chunk.InitCompleted(verifyHash); } catch { chunk.Dispose(); throw; } return(chunk); }
public void AddChunk(TFChunk.TFChunk chunk) { Ensure.NotNull(chunk, "chunk"); lock (_chunksLocker) { for (int i = chunk.ChunkHeader.ChunkStartNumber; i <= chunk.ChunkHeader.ChunkEndNumber; ++i) { _chunks[i] = chunk; } _chunksCount = chunk.ChunkHeader.ChunkEndNumber + 1; TryCacheChunk(chunk); } }
public static TFChunk FromOngoingFile(string filename, int writePosition, bool checkSize) { var chunk = new TFChunk(filename, ESConsts.TFChunkInitialReaderCount, ESConsts.TFChunkMaxReaderCount, TFConsts.MidpointsDepth); try { chunk.InitOngoing(writePosition, checkSize); } catch { chunk.Dispose(); throw; } return(chunk); }
public static TFChunk FromCompletedFile(string filename, bool verifyHash, bool unbufferedRead, int initialReaderCount, int maxReaderCount, bool optimizeReadSideCache = false, bool reduceFileCachePressure = false) { var chunk = new TFChunk(filename, initialReaderCount, maxReaderCount, TFConsts.MidpointsDepth, false, unbufferedRead, false, reduceFileCachePressure); try { chunk.InitCompleted(verifyHash, optimizeReadSideCache); } catch { chunk.Dispose(); throw; } return(chunk); }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); _prepare1 = new PrepareLogRecord(0, _corrId, _eventId, 0, 0, "test", 1, new DateTime(2000, 1, 1, 12, 0, 0), PrepareFlags.None, "Foo", new byte[12], new byte[15]); _prepare2 = new PrepareLogRecord(0, _corrId, _eventId, 0, 0, "test2", 2, new DateTime(2000, 1, 1, 12, 0, 0), PrepareFlags.None, "Foo2", new byte[12], new byte[15]); _chunk = TFChunk.CreateNew(Filename, 4096, 0, false); var r1 = _chunk.TryAppend(_prepare1); _written1 = r1.Success; _position1 = r1.OldPosition; var r2 = _chunk.TryAppend(_prepare2); _written2 = r2.Success; _position2 = r2.OldPosition; _chunk.Flush(); }
private void TryCacheChunk(TFChunk.TFChunk chunk) { if (!_cachingEnabled) { return; } Interlocked.Increment(ref _backgroundPassesRemaining); if (Interlocked.CompareExchange(ref _backgroundRunning, 1, 0) == 0) { ThreadPool.QueueUserWorkItem(BackgroundCachingProcess); } if (!chunk.IsReadOnly && chunk.ChunkHeader.ChunkSize + ChunkHeader.Size + ChunkFooter.Size <= _config.MaxChunksCacheSize) { chunk.CacheInMemory(); } }
private bool ReplaceChunksWith(TFChunk.TFChunk newChunk, string chunkExplanation) { var chunkStartNumber = newChunk.ChunkHeader.ChunkStartNumber; var chunkEndNumber = newChunk.ChunkHeader.ChunkEndNumber; for (int i = chunkStartNumber; i <= chunkEndNumber;) { var chunk = _chunks[i]; if (chunk != null) { var chunkHeader = chunk.ChunkHeader; if (chunkHeader.ChunkStartNumber < chunkStartNumber || chunkHeader.ChunkEndNumber > chunkEndNumber) { return(false); } i = chunkHeader.ChunkEndNumber + 1; } else { //Cover the case of initial replication of merged chunks where they were never set // in the map in the first place. i = i + 1; } } TFChunk.TFChunk lastRemovedChunk = null; for (int i = chunkStartNumber; i <= chunkEndNumber; i += 1) { var oldChunk = Interlocked.Exchange(ref _chunks[i], newChunk); if (oldChunk != null && !ReferenceEquals(lastRemovedChunk, oldChunk)) { oldChunk.MarkForDeletion(); Log.Info("{chunkExplanation} chunk #{oldChunk} is marked for deletion.", chunkExplanation, oldChunk); } lastRemovedChunk = oldChunk; } return(true); }
public static TFChunk FromOngoingFile(string filename, int writePosition, bool checkSize, bool unbuffered, bool writethrough, int initialReaderCount, int maxReaderCount, bool reduceFileCachePressure) { var chunk = new TFChunk(filename, initialReaderCount, maxReaderCount, TFConsts.MidpointsDepth, false, unbuffered, writethrough, reduceFileCachePressure); try { chunk.InitOngoing(writePosition, checkSize); } catch { chunk.Dispose(); throw; } return(chunk); }
public static TFChunk CreateWithHeader(string filename, ChunkHeader header, int fileSize, bool inMem) { var chunk = new TFChunk(filename, ESConsts.TFChunkInitialReaderCount, ESConsts.TFChunkMaxReaderCount, TFConsts.MidpointsDepth, inMem); try { chunk.InitNew(header, fileSize); } catch { chunk.Dispose(); throw; } return chunk; }
public static TFChunk FromCompletedFile(string filename, bool verifyHash) { var chunk = new TFChunk(filename, ESConsts.TFChunkInitialReaderCount, ESConsts.TFChunkMaxReaderCount, TFConsts.MidpointsDepth, false); try { chunk.InitCompleted(verifyHash); } catch { chunk.Dispose(); throw; } return chunk; }
public static TFChunk FromOngoingFile(string filename, int writePosition, bool checkSize) { var chunk = new TFChunk(filename, ESConsts.TFChunkInitialReaderCount, ESConsts.TFChunkMaxReaderCount, TFConsts.MidpointsDepth, false); try { chunk.InitOngoing(writePosition, checkSize); } catch { chunk.Dispose(); throw; } return chunk; }
public TFChunkReadSideScavenged(TFChunk chunk) : base(chunk) { if (!chunk.ChunkHeader.IsScavenged) throw new ArgumentException(string.Format("Chunk provided is not scavenged: {0}", chunk)); }
public TFChunkReadSideUnscavenged(TFChunk chunk): base(chunk) { if (chunk.ChunkHeader.IsScavenged) throw new ArgumentException("Scavenged TFChunk passed into unscavenged chunk read side."); }
public TFChunkReadSideUnscavenged(TFChunk chunk): base(chunk) { if (chunk.IsReadOnly && chunk.ChunkFooter.MapCount > 0) throw new ArgumentException("Scavenged TFChunk passed into unscavenged chunk read side."); }
protected TFChunkReadSide(TFChunk chunk) { Ensure.NotNull(chunk, "chunk"); Chunk = chunk; }
public TFChunkReadSideScavenged(TFChunk chunk) : base(chunk) { Ensure.Positive(chunk.ChunkFooter.MapCount, "chunk.ChunkFooter.MapCount"); }
public override void SetUp() { base.SetUp(); _chunk = TFChunk.CreateNew(Filename, 1000, 0, 0, false); _chunk.MarkForDeletion(); }
private void ScavengeChunk(bool alwaysKeepScavenged, TFChunk.TFChunk oldChunk, ThreadLocalScavengeCache threadLocalCache, CancellationToken ct) { if (oldChunk == null) { throw new ArgumentNullException("oldChunk"); } var sw = Stopwatch.StartNew(); int chunkStartNumber = oldChunk.ChunkHeader.ChunkStartNumber; long chunkStartPos = oldChunk.ChunkHeader.ChunkStartPosition; int chunkEndNumber = oldChunk.ChunkHeader.ChunkEndNumber; long chunkEndPos = oldChunk.ChunkHeader.ChunkEndPosition; var tmpChunkPath = Path.Combine(_db.Config.Path, Guid.NewGuid() + ".scavenge.tmp"); var oldChunkName = oldChunk.ToString(); Log.Trace("SCAVENGING: started to scavenge chunks: {oldChunkName} {chunkStartNumber} => {chunkEndNumber} ({chunkStartPosition} => {chunkEndPosition})", oldChunkName, chunkStartNumber, chunkEndNumber, chunkStartPos, chunkEndPos); Log.Trace("Resulting temp chunk file: {tmpChunkPath}.", Path.GetFileName(tmpChunkPath)); TFChunk.TFChunk newChunk; try { newChunk = TFChunk.TFChunk.CreateNew(tmpChunkPath, _db.Config.ChunkSize, chunkStartNumber, chunkEndNumber, isScavenged: true, inMem: _db.Config.InMemDb, unbuffered: _db.Config.Unbuffered, writethrough: _db.Config.WriteThrough, initialReaderCount: _db.Config.InitialReaderCount, reduceFileCachePressure: _db.Config.ReduceFileCachePressure); } catch (IOException exc) { Log.ErrorException(exc, "IOException during creating new chunk for scavenging purposes. Stopping scavenging process..."); throw; } try { TraverseChunkBasic(oldChunk, ct, result => { threadLocalCache.Records.Add(result); if (result.LogRecord.RecordType == LogRecordType.Commit) { var commit = (CommitLogRecord)result.LogRecord; if (commit.TransactionPosition >= chunkStartPos) { threadLocalCache.Commits.Add(commit.TransactionPosition, new CommitInfo(commit)); } } }); long newSize = 0; int filteredCount = 0; for (int i = 0; i < threadLocalCache.Records.Count; i++) { ct.ThrowIfCancellationRequested(); var recordReadResult = threadLocalCache.Records[i]; if (ShouldKeep(recordReadResult, threadLocalCache.Commits, chunkStartPos, chunkEndPos)) { newSize += recordReadResult.RecordLength + 2 * sizeof(int); filteredCount++; } else { // We don't need this record any more. threadLocalCache.Records[i] = default(CandidateRecord); } } Log.Trace("Scavenging {oldChunkName} traversed {recordsCount} including {filteredCount}.", oldChunkName, threadLocalCache.Records.Count, filteredCount); newSize += filteredCount * PosMap.FullSize + ChunkHeader.Size + ChunkFooter.Size; if (newChunk.ChunkHeader.Version >= (byte)TFChunk.TFChunk.ChunkVersions.Aligned) { newSize = TFChunk.TFChunk.GetAlignedSize((int)newSize); } bool oldVersion = oldChunk.ChunkHeader.Version != TFChunk.TFChunk.CurrentChunkVersion; long oldSize = oldChunk.FileSize; if (oldSize <= newSize && !alwaysKeepScavenged && !_unsafeIgnoreHardDeletes && !oldVersion) { Log.Trace( "Scavenging of chunks:" + "\n{oldChunkName}" + "\ncompleted in {elapsed}." + "\nOld chunks' versions are kept as they are smaller." + "\nOld chunk total size: {oldSize}, scavenged chunk size: {newSize}." + "\nScavenged chunk removed.", oldChunkName, sw.Elapsed, oldSize, newSize); newChunk.MarkForDeletion(); _scavengerLog.ChunksNotScavenged(chunkStartNumber, chunkEndNumber, sw.Elapsed, ""); } else { var positionMapping = new List <PosMap>(filteredCount); for (int i = 0; i < threadLocalCache.Records.Count; i++) { ct.ThrowIfCancellationRequested(); // Since we replaced the ones we don't want with `default`, the success flag will only be true on the ones we want to keep. var recordReadResult = threadLocalCache.Records[i]; // Check log record, if not present then assume we can skip. if (recordReadResult.LogRecord != null) { positionMapping.Add(WriteRecord(newChunk, recordReadResult.LogRecord)); } } newChunk.CompleteScavenge(positionMapping); if (_unsafeIgnoreHardDeletes) { Log.Trace("Forcing scavenge chunk to be kept even if bigger."); } if (oldVersion) { Log.Trace("Forcing scavenged chunk to be kept as old chunk is a previous version."); } var chunk = _db.Manager.SwitchChunk(newChunk, verifyHash: false, removeChunksWithGreaterNumbers: false); if (chunk != null) { Log.Trace("Scavenging of chunks:" + "\n{oldChunkName}" + "\ncompleted in {elapsed}." + "\nNew chunk: {tmpChunkPath} --> #{chunkStartNumber}-{chunkEndNumber} ({newChunk})." + "\nOld chunk total size: {oldSize}, scavenged chunk size: {newSize}.", oldChunkName, sw.Elapsed, Path.GetFileName(tmpChunkPath), chunkStartNumber, chunkEndNumber, Path.GetFileName(chunk.FileName), oldSize, newSize); var spaceSaved = oldSize - newSize; _scavengerLog.ChunksScavenged(chunkStartNumber, chunkEndNumber, sw.Elapsed, spaceSaved); } else { Log.Trace("Scavenging of chunks:" + "\n{oldChunkName}" + "\ncompleted in {elapsed}." + "\nBut switching was prevented for new chunk: #{chunkStartNumber}-{chunkEndNumber} ({tmpChunkPath})." + "\nOld chunks total size: {oldSize}, scavenged chunk size: {newSize}.", oldChunkName, sw.Elapsed, chunkStartNumber, chunkEndNumber, Path.GetFileName(tmpChunkPath), oldSize, newSize); _scavengerLog.ChunksNotScavenged(chunkStartNumber, chunkEndNumber, sw.Elapsed, "Chunk switch prevented."); } } } catch (FileBeingDeletedException exc) { Log.Info("Got FileBeingDeletedException exception during scavenging, that probably means some chunks were re-replicated." + "\nScavenging of following chunks will be skipped: {oldChunkName}" + "\nStopping scavenging and removing temp chunk '{tmpChunkPath}'..." + "\nException message: {e}.", oldChunkName, tmpChunkPath, exc.Message); newChunk.Dispose(); DeleteTempChunk(tmpChunkPath, MaxRetryCount); _scavengerLog.ChunksNotScavenged(chunkStartNumber, chunkEndNumber, sw.Elapsed, exc.Message); } catch (OperationCanceledException) { Log.Info("Scavenging cancelled at: {oldChunkName}", oldChunkName); newChunk.MarkForDeletion(); _scavengerLog.ChunksNotScavenged(chunkStartNumber, chunkEndNumber, sw.Elapsed, "Scavenge cancelled"); } catch (Exception ex) { Log.Info("Got exception while scavenging chunk: #{chunkStartNumber}-{chunkEndNumber}. This chunk will be skipped\n" + "Exception: {e}.", chunkStartNumber, chunkEndNumber, ex.ToString()); newChunk.Dispose(); DeleteTempChunk(tmpChunkPath, MaxRetryCount); _scavengerLog.ChunksNotScavenged(chunkStartNumber, chunkEndNumber, sw.Elapsed, ex.Message); } }
public bool IsOptimized(TFChunk.TFChunk chunk) { TFChunk.TFChunk value; return(_cache.TryGet(chunk.FileName, out value)); }
public void CompleteReplicatedRawChunk(TFChunk.TFChunk rawChunk) { _writerChunk = null; // in case creation of new chunk fails, we need to not use completed chunk for write rawChunk.CompleteRaw(); _db.Manager.SwitchChunk(rawChunk, verifyHash: true, replaceChunksWithGreaterNumbers: true); _writerPos = (rawChunk.ChunkHeader.ChunkEndNumber + 1) * (long)_db.Config.ChunkSize; _writerCheckpoint.Write(_writerPos); _writerCheckpoint.Flush(); _writerChunk = _db.Manager.AddNewChunk(); }
public TFChunkReadSideScavenged(TFChunk chunk, bool optimizeCache) : base(chunk) { _optimizeCache = optimizeCache; if (!chunk.ChunkHeader.IsScavenged) throw new ArgumentException(string.Format("Chunk provided is not scavenged: {0}", chunk)); }
public void CompleteChunk() { var chunk = _writerChunk; _writerChunk = null; // in case creation of new chunk fails, we need to not use completed chunk for write chunk.Complete(); _writerPos = (chunk.ChunkHeader.ChunkEndNumber + 1) * (long)_db.Config.ChunkSize; _writerCheckpoint.Write(_writerPos); _writerCheckpoint.Flush(); _writerChunk = _db.Manager.AddNewChunk(); }
public override void SetUp() { base.SetUp(); _chunk = TFChunk.CreateNew(Filename, 1024, 0, 0, false); }