/// <inheritdoc /> public void GlobalBeforeEnteringState <Key, Value>( SystemState next, FasterKV <Key, Value> faster) where Key : new() where Value : new() { switch (next.phase) { case Phase.PREP_INDEX_CHECKPOINT: Debug.Assert(faster._indexCheckpoint.IsDefault() && faster._hybridLogCheckpoint.IsDefault()); var fullCheckpointToken = Guid.NewGuid(); faster._indexCheckpointToken = fullCheckpointToken; faster._hybridLogCheckpointToken = fullCheckpointToken; faster.InitializeIndexCheckpoint(faster._indexCheckpointToken); faster.InitializeHybridLogCheckpoint(faster._hybridLogCheckpointToken, next.version); break; case Phase.WAIT_FLUSH: faster._indexCheckpoint.info.num_buckets = faster.overflowBucketsAllocator.GetMaxValidAddress(); faster.ObtainCurrentTailAddress(ref faster._indexCheckpoint.info.finalLogicalAddress); break; case Phase.PERSISTENCE_CALLBACK: faster.WriteIndexMetaInfo(); faster._indexCheckpoint.Reset(); break; } }
/// <inheritdoc /> public override void GlobalBeforeEnteringState <Key, Value>(SystemState next, FasterKV <Key, Value> faster) { switch (next.phase) { case Phase.PREPARE: faster._lastSnapshotCheckpoint.deltaFileDevice?.Dispose(); faster._lastSnapshotCheckpoint.deltaLog?.Dispose(); faster._lastSnapshotCheckpoint = default; base.GlobalBeforeEnteringState(next, faster); faster._hybridLogCheckpoint.info.startLogicalAddress = faster.hlog.FlushedUntilAddress; faster._hybridLogCheckpoint.info.useSnapshotFile = 1; break; case Phase.WAIT_FLUSH: base.GlobalBeforeEnteringState(next, faster); faster.ObtainCurrentTailAddress(ref faster._hybridLogCheckpoint.info.finalLogicalAddress); faster._hybridLogCheckpoint.info.snapshotFinalLogicalAddress = faster._hybridLogCheckpoint.info.finalLogicalAddress; faster._hybridLogCheckpoint.snapshotFileDevice = faster.checkpointManager.GetSnapshotLogDevice(faster._hybridLogCheckpointToken); faster._hybridLogCheckpoint.snapshotFileObjectLogDevice = faster.checkpointManager.GetSnapshotObjectLogDevice(faster._hybridLogCheckpointToken); faster._hybridLogCheckpoint.snapshotFileDevice.Initialize(faster.hlog.GetSegmentSize()); faster._hybridLogCheckpoint.snapshotFileObjectLogDevice.Initialize(-1); long startPage = faster.hlog.GetPage(faster._hybridLogCheckpoint.info.startLogicalAddress); long endPage = faster.hlog.GetPage(faster._hybridLogCheckpoint.info.finalLogicalAddress); if (faster._hybridLogCheckpoint.info.finalLogicalAddress > faster.hlog.GetStartLogicalAddress(endPage)) { endPage++; } // We are writing pages outside epoch protection, so callee should be able to // handle corrupted or unexpected concurrent page changes during the flush, e.g., by // resuming epoch protection if necessary. Correctness is not affected as we will // only read safe pages during recovery. faster.hlog.AsyncFlushPagesToDevice( startPage, endPage, faster._hybridLogCheckpoint.info.finalLogicalAddress, faster._hybridLogCheckpoint.snapshotFileDevice, faster._hybridLogCheckpoint.snapshotFileObjectLogDevice, out faster._hybridLogCheckpoint.flushedSemaphore); break; case Phase.PERSISTENCE_CALLBACK: // update flushed-until address to the latest faster._hybridLogCheckpoint.info.flushedLogicalAddress = faster.hlog.FlushedUntilAddress; base.GlobalBeforeEnteringState(next, faster); faster._lastSnapshotCheckpoint = faster._hybridLogCheckpoint; break; default: base.GlobalBeforeEnteringState(next, faster); break; } }
/// <inheritdoc /> public void GlobalBeforeEnteringState <Key, Value, Input, Output, Context, Functions>( SystemState next, FasterKV <Key, Value, Input, Output, Context, Functions> faster) where Key : new() where Value : new() where Functions : IFunctions <Key, Value, Input, Output, Context> { switch (next.phase) { case Phase.PREP_INDEX_CHECKPOINT: if (faster._indexCheckpointToken == default) { faster._indexCheckpointToken = Guid.NewGuid(); faster.InitializeIndexCheckpoint(faster._indexCheckpointToken); } faster.ObtainCurrentTailAddress(ref faster._indexCheckpoint.info.startLogicalAddress); break; case Phase.INDEX_CHECKPOINT: if (faster.UseReadCache && faster.ReadCache.BeginAddress != faster.ReadCache.TailAddress) { throw new FasterException("Index checkpoint with read cache is not supported"); } faster.TakeIndexFuzzyCheckpoint(); break; case Phase.REST: // If the tail address has already been obtained, because another task on the state machine // has done so earlier (e.g. FullCheckpoint captures log tail at WAIT_FLUSH), don't update // the tail address. if (faster.ObtainCurrentTailAddress(ref faster._indexCheckpoint.info.finalLogicalAddress)) { faster._indexCheckpoint.info.num_buckets = faster.overflowBucketsAllocator.GetMaxValidAddress(); } if (faster._indexCheckpointToken != default) { faster.WriteIndexMetaInfo(); faster._indexCheckpointToken = default; } faster._indexCheckpoint.Reset(); break; } }
/// <inheritdoc /> public virtual void GlobalBeforeEnteringState <Key, Value, Input, Output, Context, Functions>(SystemState next, FasterKV <Key, Value, Input, Output, Context, Functions> faster) where Key : new() where Value : new() where Functions : IFunctions <Key, Value, Input, Output, Context> { switch (next.phase) { case Phase.PREPARE: if (faster._hybridLogCheckpointToken == default) { faster._hybridLogCheckpointToken = Guid.NewGuid(); faster.InitializeHybridLogCheckpoint(faster._hybridLogCheckpointToken, next.version); } faster.ObtainCurrentTailAddress(ref faster._hybridLogCheckpoint.info.startLogicalAddress); break; case Phase.WAIT_FLUSH: faster._hybridLogCheckpoint.info.headAddress = faster.hlog.HeadAddress; faster._hybridLogCheckpoint.info.beginAddress = faster.hlog.BeginAddress; break; case Phase.PERSISTENCE_CALLBACK: // Collect object log offsets only after flushes // are completed var seg = faster.hlog.GetSegmentOffsets(); if (seg != null) { faster._hybridLogCheckpoint.info.objectLogSegmentOffsets = new long[seg.Length]; Array.Copy(seg, faster._hybridLogCheckpoint.info.objectLogSegmentOffsets, seg.Length); } if (faster._activeSessions != null) { // write dormant sessions to checkpoint foreach (var kvp in faster._activeSessions) { faster.AtomicSwitch(kvp.Value.ctx, kvp.Value.ctx.prevCtx, next.version - 1); } } faster.WriteHybridLogMetaInfo(); break; case Phase.REST: faster._hybridLogCheckpointToken = default; faster._hybridLogCheckpoint.Reset(); var nextTcs = new TaskCompletionSource <LinkedCheckpointInfo>(TaskCreationOptions.RunContinuationsAsynchronously); faster.checkpointTcs.SetResult(new LinkedCheckpointInfo { NextTask = nextTcs.Task }); faster.checkpointTcs = nextTcs; break; } }
/// <inheritdoc /> public virtual void GlobalBeforeEnteringState <Key, Value>(SystemState next, FasterKV <Key, Value> faster) where Key : new() where Value : new() { switch (next.phase) { case Phase.PREPARE: if (faster._hybridLogCheckpoint.IsDefault()) { faster._hybridLogCheckpointToken = Guid.NewGuid(); faster.InitializeHybridLogCheckpoint(faster._hybridLogCheckpointToken, next.version); } faster.ObtainCurrentTailAddress(ref faster._hybridLogCheckpoint.info.startLogicalAddress); break; case Phase.WAIT_FLUSH: faster._hybridLogCheckpoint.info.headAddress = faster.hlog.HeadAddress; faster._hybridLogCheckpoint.info.beginAddress = faster.hlog.BeginAddress; break; case Phase.PERSISTENCE_CALLBACK: // Collect object log offsets only after flushes // are completed var seg = faster.hlog.GetSegmentOffsets(); if (seg != null) { faster._hybridLogCheckpoint.info.objectLogSegmentOffsets = new long[seg.Length]; Array.Copy(seg, faster._hybridLogCheckpoint.info.objectLogSegmentOffsets, seg.Length); } // Temporarily block new sessions from starting, which may add an entry to the table and resize the // dictionary. There should be minimal contention here. lock (faster._activeSessions) // write dormant sessions to checkpoint foreach (var kvp in faster._activeSessions) { kvp.Value.AtomicSwitch(next.version - 1); } faster.WriteHybridLogMetaInfo(); break; case Phase.REST: faster._hybridLogCheckpoint.Reset(); var nextTcs = new TaskCompletionSource <LinkedCheckpointInfo>(TaskCreationOptions.RunContinuationsAsynchronously); faster.checkpointTcs.SetResult(new LinkedCheckpointInfo { NextTask = nextTcs.Task }); faster.checkpointTcs = nextTcs; break; } }
/// <inheritdoc /> public void GlobalBeforeEnteringState <Key, Value>( SystemState next, FasterKV <Key, Value> faster) { switch (next.phase) { case Phase.PREP_INDEX_CHECKPOINT: if (faster._indexCheckpoint.IsDefault()) { faster._indexCheckpointToken = Guid.NewGuid(); faster.InitializeIndexCheckpoint(faster._indexCheckpointToken); } faster.ObtainCurrentTailAddress(ref faster._indexCheckpoint.info.startLogicalAddress); faster.TakeIndexFuzzyCheckpoint(); break; case Phase.WAIT_INDEX_CHECKPOINT: case Phase.WAIT_INDEX_ONLY_CHECKPOINT: break; case Phase.REST: // If the tail address has already been obtained, because another task on the state machine // has done so earlier (e.g. FullCheckpoint captures log tail at WAIT_FLUSH), don't update // the tail address. if (faster.ObtainCurrentTailAddress(ref faster._indexCheckpoint.info.finalLogicalAddress)) { faster._indexCheckpoint.info.num_buckets = faster.overflowBucketsAllocator.GetMaxValidAddress(); } if (!faster._indexCheckpoint.IsDefault()) { faster.WriteIndexMetaInfo(); faster._indexCheckpoint.Reset(); } break; } }
/// <inheritdoc /> public override void GlobalBeforeEnteringState <Key, Value>(SystemState next, FasterKV <Key, Value> faster) { switch (next.Phase) { case Phase.PREPARE: faster._hybridLogCheckpoint = faster._lastSnapshotCheckpoint; base.GlobalBeforeEnteringState(next, faster); faster._hybridLogCheckpoint.info.startLogicalAddress = faster.hlog.FlushedUntilAddress; faster._hybridLogCheckpoint.prevVersion = next.Version; break; case Phase.WAIT_FLUSH: base.GlobalBeforeEnteringState(next, faster); faster._hybridLogCheckpoint.info.finalLogicalAddress = 0; faster.ObtainCurrentTailAddress(ref faster._hybridLogCheckpoint.info.finalLogicalAddress); if (faster._hybridLogCheckpoint.deltaLog == null) { faster._hybridLogCheckpoint.deltaFileDevice = faster.checkpointManager.GetDeltaLogDevice(faster._hybridLogCheckpointToken); faster._hybridLogCheckpoint.deltaFileDevice.Initialize(-1); faster._hybridLogCheckpoint.deltaLog = new DeltaLog(faster._hybridLogCheckpoint.deltaFileDevice, faster.hlog.LogPageSizeBits, -1); faster._hybridLogCheckpoint.deltaLog.InitializeForWrites(faster.hlog.bufferPool); } faster.hlog.AsyncFlushDeltaToDevice( faster._hybridLogCheckpoint.info.startLogicalAddress, faster._hybridLogCheckpoint.info.finalLogicalAddress, faster._lastSnapshotCheckpoint.info.finalLogicalAddress, faster._hybridLogCheckpoint.prevVersion, faster._hybridLogCheckpoint.deltaLog); break; case Phase.PERSISTENCE_CALLBACK: faster._hybridLogCheckpoint.info.flushedLogicalAddress = faster.hlog.FlushedUntilAddress; CollectMetadata(next, faster); faster.WriteHybridLogIncrementalMetaInfo(faster._hybridLogCheckpoint.deltaLog); faster._hybridLogCheckpoint.info.deltaTailAddress = faster._hybridLogCheckpoint.deltaLog.TailAddress; faster._lastSnapshotCheckpoint = faster._hybridLogCheckpoint.Transfer(); faster._hybridLogCheckpoint.Dispose(); break; } }
/// <inheritdoc /> public override void GlobalBeforeEnteringState <Key, Value>(SystemState next, FasterKV <Key, Value> faster) { base.GlobalBeforeEnteringState(next, faster); switch (next.phase) { case Phase.PREPARE: faster._hybridLogCheckpoint.info.flushedLogicalAddress = faster.hlog.FlushedUntilAddress; faster._hybridLogCheckpoint.info.useSnapshotFile = 1; break; case Phase.WAIT_FLUSH: faster.ObtainCurrentTailAddress(ref faster._hybridLogCheckpoint.info.finalLogicalAddress); faster._hybridLogCheckpoint.snapshotFileDevice = faster.checkpointManager.GetSnapshotLogDevice(faster._hybridLogCheckpointToken); faster._hybridLogCheckpoint.snapshotFileObjectLogDevice = faster.checkpointManager.GetSnapshotObjectLogDevice(faster._hybridLogCheckpointToken); faster._hybridLogCheckpoint.snapshotFileDevice.Initialize(faster.hlog.GetSegmentSize()); faster._hybridLogCheckpoint.snapshotFileObjectLogDevice.Initialize(-1); long startPage = faster.hlog.GetPage(faster._hybridLogCheckpoint.info.flushedLogicalAddress); long endPage = faster.hlog.GetPage(faster._hybridLogCheckpoint.info.finalLogicalAddress); if (faster._hybridLogCheckpoint.info.finalLogicalAddress > faster.hlog.GetStartLogicalAddress(endPage)) { endPage++; } // This can be run on a new thread if we want to immediately parallelize // the rest of the log flush faster.hlog.AsyncFlushPagesToDevice( startPage, endPage, faster._hybridLogCheckpoint.info.finalLogicalAddress, faster._hybridLogCheckpoint.snapshotFileDevice, faster._hybridLogCheckpoint.snapshotFileObjectLogDevice, out faster._hybridLogCheckpoint.flushedSemaphore); break; } }
/// <inheritdoc /> public void GlobalBeforeEnteringState <Key, Value, Input, Output, Context, Functions>( SystemState next, FasterKV <Key, Value, Input, Output, Context, Functions> faster) where Key : new() where Value : new() where Functions : IFunctions <Key, Value, Input, Output, Context> { switch (next.phase) { case Phase.PREP_INDEX_CHECKPOINT: Debug.Assert(faster._indexCheckpointToken == default && faster._hybridLogCheckpointToken == default); var fullCheckpointToken = Guid.NewGuid(); faster._indexCheckpointToken = fullCheckpointToken; faster._hybridLogCheckpointToken = fullCheckpointToken; faster.InitializeIndexCheckpoint(faster._indexCheckpointToken); faster.InitializeHybridLogCheckpoint(faster._hybridLogCheckpointToken, next.version); break; case Phase.PREPARE: if (faster.UseReadCache && faster.ReadCache.BeginAddress != faster.ReadCache.TailAddress) { throw new FasterException("Index checkpoint with read cache is not supported"); } faster.TakeIndexFuzzyCheckpoint(); break; case Phase.WAIT_FLUSH: faster._indexCheckpoint.info.num_buckets = faster.overflowBucketsAllocator.GetMaxValidAddress(); faster.ObtainCurrentTailAddress(ref faster._indexCheckpoint.info.finalLogicalAddress); break; case Phase.PERSISTENCE_CALLBACK: faster.WriteIndexMetaInfo(); faster._indexCheckpointToken = default; break; } }
/// <inheritdoc /> public virtual void GlobalBeforeEnteringState <Key, Value>(SystemState next, FasterKV <Key, Value> faster) { switch (next.Phase) { case Phase.PREPARE: lastVersion = faster.systemState.Version; if (faster._hybridLogCheckpoint.IsDefault()) { faster._hybridLogCheckpointToken = Guid.NewGuid(); faster.InitializeHybridLogCheckpoint(faster._hybridLogCheckpointToken, next.Version); } faster._hybridLogCheckpoint.info.version = next.Version; faster.ObtainCurrentTailAddress(ref faster._hybridLogCheckpoint.info.startLogicalAddress); break; case Phase.WAIT_FLUSH: faster._hybridLogCheckpoint.info.headAddress = faster.hlog.HeadAddress; faster._hybridLogCheckpoint.info.beginAddress = faster.hlog.BeginAddress; faster._hybridLogCheckpoint.info.nextVersion = next.Version; break; case Phase.PERSISTENCE_CALLBACK: CollectMetadata(next, faster); faster.WriteHybridLogMetaInfo(); faster.lastVersion = lastVersion; break; case Phase.REST: faster._hybridLogCheckpoint.Dispose(); var nextTcs = new TaskCompletionSource <LinkedCheckpointInfo>(TaskCreationOptions.RunContinuationsAsynchronously); faster.checkpointTcs.SetResult(new LinkedCheckpointInfo { NextTask = nextTcs.Task }); faster.checkpointTcs = nextTcs; break; } }