/// <inheritdoc /> public void GlobalBeforeEnteringState <Key, Value>( SystemState next, FasterKV <Key, Value> faster) where Key : new() where Value : new() { switch (next.phase) { case Phase.PREP_INDEX_CHECKPOINT: Debug.Assert(faster._indexCheckpoint.IsDefault() && faster._hybridLogCheckpoint.IsDefault()); var fullCheckpointToken = Guid.NewGuid(); faster._indexCheckpointToken = fullCheckpointToken; faster._hybridLogCheckpointToken = fullCheckpointToken; faster.InitializeIndexCheckpoint(faster._indexCheckpointToken); faster.InitializeHybridLogCheckpoint(faster._hybridLogCheckpointToken, next.version); break; case Phase.WAIT_FLUSH: faster._indexCheckpoint.info.num_buckets = faster.overflowBucketsAllocator.GetMaxValidAddress(); faster.ObtainCurrentTailAddress(ref faster._indexCheckpoint.info.finalLogicalAddress); break; case Phase.PERSISTENCE_CALLBACK: faster.WriteIndexMetaInfo(); faster._indexCheckpoint.Reset(); break; } }
/// <inheritdoc /> public void GlobalBeforeEnteringState <Key, Value, Input, Output, Context, Functions>( SystemState next, FasterKV <Key, Value, Input, Output, Context, Functions> faster) where Key : new() where Value : new() where Functions : IFunctions <Key, Value, Input, Output, Context> { switch (next.phase) { case Phase.PREP_INDEX_CHECKPOINT: if (faster._indexCheckpointToken == default) { faster._indexCheckpointToken = Guid.NewGuid(); faster.InitializeIndexCheckpoint(faster._indexCheckpointToken); } faster.ObtainCurrentTailAddress(ref faster._indexCheckpoint.info.startLogicalAddress); break; case Phase.INDEX_CHECKPOINT: if (faster.UseReadCache && faster.ReadCache.BeginAddress != faster.ReadCache.TailAddress) { throw new FasterException("Index checkpoint with read cache is not supported"); } faster.TakeIndexFuzzyCheckpoint(); break; case Phase.REST: // If the tail address has already been obtained, because another task on the state machine // has done so earlier (e.g. FullCheckpoint captures log tail at WAIT_FLUSH), don't update // the tail address. if (faster.ObtainCurrentTailAddress(ref faster._indexCheckpoint.info.finalLogicalAddress)) { faster._indexCheckpoint.info.num_buckets = faster.overflowBucketsAllocator.GetMaxValidAddress(); } if (faster._indexCheckpointToken != default) { faster.WriteIndexMetaInfo(); faster._indexCheckpointToken = default; } faster._indexCheckpoint.Reset(); break; } }
/// <inheritdoc /> public void GlobalBeforeEnteringState <Key, Value, Input, Output, Context, Functions>( SystemState next, FasterKV <Key, Value, Input, Output, Context, Functions> faster) where Key : new() where Value : new() where Functions : IFunctions <Key, Value, Input, Output, Context> { switch (next.phase) { case Phase.PREP_INDEX_CHECKPOINT: Debug.Assert(faster._indexCheckpointToken == default && faster._hybridLogCheckpointToken == default); var fullCheckpointToken = Guid.NewGuid(); faster._indexCheckpointToken = fullCheckpointToken; faster._hybridLogCheckpointToken = fullCheckpointToken; faster.InitializeIndexCheckpoint(faster._indexCheckpointToken); faster.InitializeHybridLogCheckpoint(faster._hybridLogCheckpointToken, next.version); break; case Phase.PREPARE: if (faster.UseReadCache && faster.ReadCache.BeginAddress != faster.ReadCache.TailAddress) { throw new FasterException("Index checkpoint with read cache is not supported"); } faster.TakeIndexFuzzyCheckpoint(); break; case Phase.WAIT_FLUSH: faster._indexCheckpoint.info.num_buckets = faster.overflowBucketsAllocator.GetMaxValidAddress(); faster.ObtainCurrentTailAddress(ref faster._indexCheckpoint.info.finalLogicalAddress); break; case Phase.PERSISTENCE_CALLBACK: faster.WriteIndexMetaInfo(); faster._indexCheckpointToken = default; break; } }
/// <inheritdoc /> public void GlobalBeforeEnteringState <Key, Value>( SystemState next, FasterKV <Key, Value> faster) { switch (next.phase) { case Phase.PREP_INDEX_CHECKPOINT: if (faster._indexCheckpoint.IsDefault()) { faster._indexCheckpointToken = Guid.NewGuid(); faster.InitializeIndexCheckpoint(faster._indexCheckpointToken); } faster.ObtainCurrentTailAddress(ref faster._indexCheckpoint.info.startLogicalAddress); faster.TakeIndexFuzzyCheckpoint(); break; case Phase.WAIT_INDEX_CHECKPOINT: case Phase.WAIT_INDEX_ONLY_CHECKPOINT: break; case Phase.REST: // If the tail address has already been obtained, because another task on the state machine // has done so earlier (e.g. FullCheckpoint captures log tail at WAIT_FLUSH), don't update // the tail address. if (faster.ObtainCurrentTailAddress(ref faster._indexCheckpoint.info.finalLogicalAddress)) { faster._indexCheckpoint.info.num_buckets = faster.overflowBucketsAllocator.GetMaxValidAddress(); } if (!faster._indexCheckpoint.IsDefault()) { faster.WriteIndexMetaInfo(); faster._indexCheckpoint.Reset(); } break; } }