internal CompleteCheckpointLogRecord( LogicalSequenceNumber lsn, IndexingLogRecord logHeadRecord, PhysicalLogRecord lastLinkedPhysicalRecord) : base(LogRecordType.CompleteCheckpoint, logHeadRecord, lsn, lastLinkedPhysicalRecord) { }
public bool IsGoodLogHeadCandidiate(IndexingLogRecord proposedLogHead) { // Configured truncation has been initiated, override minTruncationAmount and minLogSize requirements if (periodicCheckpointTruncationState == PeriodicCheckpointTruncationState.TruncationStarted) { return(true); } // This is a very recent indexing record and not a good candidate // We can say that because it has not yet been flushed to the disk if (proposedLogHead.RecordPosition == LogRecord.InvalidRecordPosition) { return(false); } // Is it worth truncating? // We do not want to truncate a couple of bytes since we would have to repeat the process soon. if (proposedLogHead.RecordPosition - this.logManager.CurrentLogHeadRecord.RecordPosition < this.minTruncationAmountInBytes) { return(false); } // Would it truncate too much? // We do not want to truncate if it would cause the log to shrink below minTruncationAmountInBytes. // This is because small logs can cause unnecessary full copies and full backups. ulong resultingLogSize = this.GetCurrentTailPosition() - proposedLogHead.RecordPosition; if (resultingLogSize < this.minLogSizeInBytes) { return(false); } return(true); }
/// <summary> /// Creates or finds the log stream. /// If being created either initializes the log with default log records or records from backup log. /// </summary> /// <param name="openMode">Open mode of the replica.</param> /// <returns>Task that represents the asynchronous open operation.</returns> internal async Task <PhysicalLogReader> OpenAsync(ReplicaOpenMode openMode) { // TODO: Anurag: do we plumb c.token up? this.LogicalLog = await this.CreateLogFileAsync(openMode == ReplicaOpenMode.New, CancellationToken.None).ConfigureAwait(false); var logLogLength = this.LogicalLog.Length; if (logLogLength <= sizeof(int)) { // No usable content in the log if (this.LogicalLog.WritePosition > 0) { await this.LogicalLog.TruncateTail(0, CancellationToken.None).ConfigureAwait(false); // Remove all contents and reset write cursor back to 0 Utility.Assert(this.LogicalLog.Length == 0, "this.logicalLog.Length == 0"); Utility.Assert(this.LogicalLog.WritePosition == 0, "this.logicalLog.WritePosition == 0"); } using ( var logWriter = new PhysicalLogWriter( this.LogicalLog, this.emptyCallbackManager, this.Tracer, this.MaxWriteCacheSizeInMB, this.IncomingBytesRateCounterWriter, this.LogFlushBytesRateCounterWriter, this.BytesPerFlushCounterWriter, this.AvgFlushLatencyCounterWriter, this.AvgSerializationLatencyCounterWriter, false)) { var zeroIndexRecord = IndexingLogRecord.CreateZeroIndexingLogRecord(); logWriter.InsertBufferedRecord(zeroIndexRecord); logWriter.InsertBufferedRecord(UpdateEpochLogRecord.CreateZeroUpdateEpochLogRecord()); var zeroBeginCheckpointRecord = BeginCheckpointLogRecord.CreateZeroBeginCheckpointLogRecord(); logWriter.InsertBufferedRecord(zeroBeginCheckpointRecord); logWriter.InsertBufferedRecord(BarrierLogRecord.CreateOneBarrierLogRecord()); var oneEndCheckpointRecord = EndCheckpointLogRecord.CreateOneEndCheckpointLogRecord( zeroBeginCheckpointRecord, zeroIndexRecord); logWriter.InsertBufferedRecord(oneEndCheckpointRecord); var endCompleteCheckpointRecord = new CompleteCheckpointLogRecord( LogicalSequenceNumber.OneLsn, zeroIndexRecord, oneEndCheckpointRecord); logWriter.InsertBufferedRecord(endCompleteCheckpointRecord); await logWriter.FlushAsync("OpenAsync").ConfigureAwait(false); // This additional await is required to ensure the log record was indeed flushed. // Without this, the flushasync could succeed, but the log record flush could have failed due to a write error await endCompleteCheckpointRecord.AwaitFlush().ConfigureAwait(false); } } return(new PhysicalLogReader(this)); }
/// <summary> /// Process the indexing log record. /// </summary> /// <param name="indexingLogRecord">The indexing log record to be processed.</param> /// <param name="isRecoverableRecord">Is this a recoverable record.</param> private void ProcessLogRecord(IndexingLogRecord indexingLogRecord, out bool isRecoverableRecord) { isRecoverableRecord = false; Utility.Assert( indexingLogRecord.CurrentEpoch == this.CurrentLogTailEpoch, "indexingRecord.CurrentEpoch == this.currentLogTailEpoch"); }
public void Test_TruncateHead( TruncateHeadLogRecord record, IndexingLogRecord headRecord) { this.LastLinkedPhysicalRecord = record; this.CurrentLogHeadRecord = headRecord; this.LastInProgressTruncateHeadRecord = record; this.LogManager.PhysicalLogWriter.InsertBufferedRecord(record); }
internal LogHeadRecord(LogRecordType recordType, ulong recordPosition, long lsn) : base(recordType, recordPosition, lsn) { this.logHeadEpoch = LogicalSequenceNumber.InvalidEpoch; this.logHeadLsn = LogicalSequenceNumber.InvalidLsn; this.logHeadPsn = PhysicalSequenceNumber.InvalidPsn; this.logHeadRecordOffset = InvalidPhysicalRecordOffset; this.logHeadRecord = IndexingLogRecord.InvalidIndexingLogRecord; }
internal override async Task <IndexingLogRecord> CreateCopyLogAsync( Epoch startingEpoch, LogicalSequenceNumber startingLsn) { var flushCallback = this.PhysicalLogWriter.CallbackManager.Callback; await this.CloseCurrentLogAsync().ConfigureAwait(false); this.CurrentLogFileAlias = this.BaseLogFileAlias + CopySuffix; try { var aliasGuid = await this.physicalLog.ResolveAliasAsync(this.CurrentLogFileAlias, CancellationToken.None).ConfigureAwait(false); FabricEvents.Events.LogManager( this.Tracer.Type, "CreateCopyLog: Attempt to delete logical log " + this.CurrentLogFileAlias + " guid: " + aliasGuid); await this.physicalLog.DeleteLogicalLogAsync(aliasGuid, CancellationToken.None).ConfigureAwait(false); } catch (Exception ex) { FabricEvents.Events.LogManager( this.Tracer.Type, "CreateCopyLog: Delete logical log: " + this.CurrentLogFileAlias + " failed: " + ex); } this.LogicalLog = await this.CreateLogFileAsync(true, CancellationToken.None).ConfigureAwait(false); var callbackManager = new PhysicalLogWriterCallbackManager( flushCallback, this.Tracer); this.PhysicalLogWriter = new PhysicalLogWriter( this.LogicalLog, callbackManager, this.Tracer, this.MaxWriteCacheSizeInMB, this.IncomingBytesRateCounterWriter, this.LogFlushBytesRateCounterWriter, this.BytesPerFlushCounterWriter, this.AvgFlushLatencyCounterWriter, this.AvgSerializationLatencyCounterWriter, false); var firstIndexingRecord = new IndexingLogRecord(startingEpoch, startingLsn, null); this.PhysicalLogWriter.InsertBufferedRecord(firstIndexingRecord); await this.PhysicalLogWriter.FlushAsync("CreateCopyLogAsync").ConfigureAwait(false); this.LogHeadRecordPosition = firstIndexingRecord.RecordPosition; return(firstIndexingRecord); }
/// <summary> /// Do a check if our log matches truncate head criteria, and append truncate head record /// </summary> public TruncateHeadLogRecord TruncateHead( bool isStable, long lastPeriodicTruncationTimeTicks, Func <IndexingLogRecord, bool> isGoodLogHeadCandidateCalculator) { lock (lsnOrderingLock) { var earliestRecord = this.GetEarliestRecordCallerHoldsLock(); IndexingLogRecord previousIndexingRecord = null; PhysicalLogRecord previousPhysicalRecord = this.LastCompletedBeginCheckpointRecord; TruncateHeadLogRecord record = null; do { // Search for the last Indexing Log Record do { previousPhysicalRecord = previousPhysicalRecord.PreviousPhysicalRecord; } while (!(previousPhysicalRecord is IndexingLogRecord)); previousIndexingRecord = previousPhysicalRecord as IndexingLogRecord; // This index record is not before the ealiest pending transaction record if (previousIndexingRecord.RecordPosition >= earliestRecord.RecordPosition) { continue; } // We reached log head, so do not continue to look for last index log record if (previousIndexingRecord == this.CurrentLogHeadRecord) { return(null); } if (isGoodLogHeadCandidateCalculator(previousIndexingRecord)) { break; } } while (true); record = new TruncateHeadLogRecord( previousIndexingRecord, this.CurrentLogTailLsn, this.LastLinkedPhysicalRecord, isStable, lastPeriodicTruncationTimeTicks); Test_TruncateHead(record, previousIndexingRecord); return(record); } }
internal TruncateHeadLogRecord( IndexingLogRecord logHeadRecord, LogicalSequenceNumber lsn, PhysicalLogRecord lastLinkedPhysicalRecord, bool isStable, long periodicTruncationTimeTicks) : base(LogRecordType.TruncateHead, logHeadRecord, lsn, lastLinkedPhysicalRecord) { this.truncationState = TruncationState.Invalid; this.isStable = isStable; this.periodicTruncationTimeTicks = periodicTruncationTimeTicks; this.UpdateApproximateDiskSize(); }
public void Open( ITracer tracer, Func <bool, BeginCheckpointLogRecord> appendCheckpointCallback, IndexingLogRecord currentHead, RoleContextDrainState roleContextDrainState, IStateReplicator fabricReplicator) { this.appendCheckpointCallback = appendCheckpointCallback; this.RoleContextDrainState = roleContextDrainState; this.fabricReplicator = fabricReplicator; this.tracer = tracer; this.CurrentLogHeadRecord = currentHead; this.replicationSerializationBinaryWritersPoolClearTimer.Change(Constants.ReplicationWriteMemoryStreamsBufferPoolCleanupMilliseconds, Timeout.Infinite); }
internal LogHeadRecord( LogRecordType recordType, IndexingLogRecord logHeadRecord, LogicalSequenceNumber lsn, PhysicalLogRecord lastLinkedPhysicalRecord) : base(recordType, lsn, lastLinkedPhysicalRecord) { this.logHeadEpoch = logHeadRecord.CurrentEpoch; this.logHeadLsn = logHeadRecord.Lsn; this.logHeadPsn = logHeadRecord.Psn; this.logHeadRecordOffset = InvalidPhysicalRecordOffset; this.logHeadRecord = logHeadRecord; this.UpdateApproximateDiskSize(); }
internal EndCheckpointLogRecord( BeginCheckpointLogRecord lastCompletedBeginCheckpointRecord, IndexingLogRecord logHeadRecord, LogicalSequenceNumber lsn, PhysicalLogRecord linkedPhysicalRecord) : base(LogRecordType.EndCheckpoint, logHeadRecord, lsn, linkedPhysicalRecord) { this.lastCompletedBeginCheckpointRecordOffset = InvalidPhysicalRecordOffset; this.lastStableLsn = lastCompletedBeginCheckpointRecord.LastStableLsn; this.lastCompletedBeginCheckpointRecord = lastCompletedBeginCheckpointRecord; Utility.Assert( this.lastStableLsn >= this.lastCompletedBeginCheckpointRecord.Lsn, "this.lastStableLsn >= this.lastCompletedBeginCheckpointRecord.LastLogicalSequenceNumber. Last stable lsn is : {0} and last completed checkpoint record is {1}", this.lastStableLsn.LSN, this.lastCompletedBeginCheckpointRecord.Lsn.LSN); this.UpdateApproximateDiskSize(); }
/// <summary> /// Initializes a new instance of the LogRecordsMap class for restore. /// </summary> /// <param name="indexingLogRecord">The string indexing log record.</param> /// <param name="tracer"></param> public LogRecordsMap( IndexingLogRecord indexingLogRecord, ITracer tracer) { this.mode = Mode.Restore; this.LastLogicalSequenceNumber = indexingLogRecord.Lsn; this.TransactionsMap = new TransactionMap(); this.CurrentLogTailEpoch = indexingLogRecord.CurrentEpoch; this.LastRecoveredAtomicRedoOperationLsn = long.MinValue; this.LastStableLsn = LogicalSequenceNumber.InvalidLsn; this.LastPhysicalRecord = PhysicalLogRecord.InvalidPhysicalLogRecord; this.ProgressVector = ProgressVector.ZeroProgressVector; this.firstIndexingLogRecord = indexingLogRecord; this.tracer = tracer; }
public void Reuse( ProgressVector progressVector, EndCheckpointLogRecord lastCompletedEndCheckpointRecord, BeginCheckpointLogRecord lastInProgressBeginCheckpointRecord, PhysicalLogRecord lastLinkedPhysicalRecord, InformationLogRecord lastInformationRecord, IndexingLogRecord currentLogHeadRecord, Epoch tailEpoch, LogicalSequenceNumber tailLsn) { this.LastInProgressCheckpointRecord = lastInProgressBeginCheckpointRecord; Utility.Assert(this.LastInProgressCheckpointRecord == null, "ReInitialize of ReplicatedLogManager must have null in progress checkpoint"); this.LastInProgressTruncateHeadRecord = null; this.ProgressVector = progressVector; this.LastCompletedEndCheckpointRecord = lastCompletedEndCheckpointRecord; this.LastLinkedPhysicalRecord = lastLinkedPhysicalRecord; this.CurrentLogHeadRecord = currentLogHeadRecord; this.LastInformationRecord = lastInformationRecord; this.CurrentLogTailEpoch = tailEpoch; this.CurrentLogTailLsn = tailLsn; }
internal override bool FreePreviousLinksLowerThanPsn(PhysicalSequenceNumber newHeadPsn) { bool ret = base.FreePreviousLinksLowerThanPsn(newHeadPsn); if (this.logHeadRecord != null && this.logHeadRecord.Psn < newHeadPsn) { Utility.Assert( this.logHeadRecordOffset != InvalidPhysicalRecordOffset, "this.logHeadRecordOffset must be valid"); this.logHeadRecord = (this.logHeadRecordOffset == 0) ? null : IndexingLogRecord.InvalidIndexingLogRecord; return(true); } return(ret); }
public LoggingReplicatorCopyContext( long replicaId, ProgressVector progressVector, IndexingLogRecord logHeadRecord, LogicalSequenceNumber logTailLsn, long latestrecoveredAtomicRedoOperationLsn) { using (var stream = new MemoryStream()) { using (var bw = new BinaryWriter(stream)) { bw.Write(replicaId); progressVector.Write(bw); bw.Write(logHeadRecord.CurrentEpoch.DataLossNumber); bw.Write(logHeadRecord.CurrentEpoch.ConfigurationNumber); bw.Write(logHeadRecord.Lsn.LSN); bw.Write(logTailLsn.LSN); bw.Write(latestrecoveredAtomicRedoOperationLsn); this.copyData = new OperationData(stream.ToArray()); this.isDone = false; } } }
internal override async Task <IndexingLogRecord> CreateCopyLogAsync( Epoch startingEpoch, LogicalSequenceNumber startingLsn) { var flushCallback = this.PhysicalLogWriter.CallbackManager.Callback; await this.CloseCurrentLogAsync().ConfigureAwait(false); this.CurrentLogFileAlias = this.BaseLogFileAlias + CopySuffix; this.LogicalLog = await this.CreateLogFileAsync(true, CancellationToken.None).ConfigureAwait(false); var callbackManager = new PhysicalLogWriterCallbackManager( flushCallback, this.Tracer); this.PhysicalLogWriter = new PhysicalLogWriter( this.LogicalLog, callbackManager, this.Tracer, this.MaxWriteCacheSizeInMB, this.IncomingBytesRateCounterWriter, this.LogFlushBytesRateCounterWriter, this.BytesPerFlushCounterWriter, this.AvgFlushLatencyCounterWriter, this.AvgSerializationLatencyCounterWriter, false); var firstIndexingRecord = new IndexingLogRecord(startingEpoch, startingLsn, null); this.PhysicalLogWriter.InsertBufferedRecord(firstIndexingRecord); await this.PhysicalLogWriter.FlushAsync("CreateCopyLogAsync").ConfigureAwait(false); this.LogHeadRecordPosition = firstIndexingRecord.RecordPosition; return(firstIndexingRecord); }
internal static EndCheckpointLogRecord CreateOneEndCheckpointLogRecord( BeginCheckpointLogRecord lastCompletedBeginCheckpointRecord, IndexingLogRecord logHeadRecord) { return(new EndCheckpointLogRecord(lastCompletedBeginCheckpointRecord, logHeadRecord)); }
private EndCheckpointLogRecord( BeginCheckpointLogRecord lastCompletedBeginCheckpointRecord, IndexingLogRecord logHeadRecord) : this(lastCompletedBeginCheckpointRecord, logHeadRecord, LogicalSequenceNumber.OneLsn, null) { }
private static LogRecord ReadRecord(BinaryReader br, ulong recordPosition, bool isPhysicalRead) { LogRecord record; var lsn = LogicalSequenceNumber.InvalidLsn.LSN; LogRecordType recordType; // Metadata section. var startingPosition = br.BaseStream.Position; var sizeOfSection = br.ReadInt32(); var endPosition = startingPosition + sizeOfSection; // Read Logical Metadata recordType = (LogRecordType)br.ReadUInt32(); switch (recordType) { case LogRecordType.BeginTransaction: record = new BeginTransactionOperationLogRecord(recordType, recordPosition, lsn); break; case LogRecordType.Operation: record = new OperationLogRecord(recordType, recordPosition, lsn); break; case LogRecordType.EndTransaction: record = new EndTransactionLogRecord(recordType, recordPosition, lsn); break; case LogRecordType.Barrier: record = new BarrierLogRecord(recordType, recordPosition, lsn); break; case LogRecordType.UpdateEpoch: record = new UpdateEpochLogRecord(recordType, recordPosition, lsn); break; case LogRecordType.Backup: record = new BackupLogRecord(recordType, recordPosition, lsn); break; case LogRecordType.BeginCheckpoint: record = new BeginCheckpointLogRecord(recordType, recordPosition, lsn); break; case LogRecordType.EndCheckpoint: record = new EndCheckpointLogRecord(recordType, recordPosition, lsn); break; case LogRecordType.Indexing: record = new IndexingLogRecord(recordType, recordPosition, lsn); break; case LogRecordType.TruncateHead: record = new TruncateHeadLogRecord(recordType, recordPosition, lsn); break; case LogRecordType.TruncateTail: record = new TruncateTailLogRecord(recordType, recordPosition, lsn); break; case LogRecordType.Information: record = new InformationLogRecord(recordType, recordPosition, lsn); break; case LogRecordType.CompleteCheckpoint: record = new CompleteCheckpointLogRecord(recordType, recordPosition, lsn); break; default: Utility.CodingError("Unexpected record type {0}", recordType); return(null); } record.lsn = new LogicalSequenceNumber(br.ReadInt64()); // Jump to the end of the section ignoring fields that are not understood. Utility.Assert(endPosition >= br.BaseStream.Position, "Could not have read more than section size."); br.BaseStream.Position = endPosition; record.Read(br, isPhysicalRead); return(record); }
internal async Task <PhysicalLogReader> OpenWithRestoreFileAsync( ReplicaOpenMode openMode, FabricPerformanceCounterSetInstance perfCounterInstance, IList <string> backupLogFilePathList, int flushEveryNKB) { this.LogicalLog = await this.CreateLogFileAsync(openMode == ReplicaOpenMode.New, CancellationToken.None).ConfigureAwait(false); // No usable content in the log if (this.LogicalLog.WritePosition > 0) { await this.LogicalLog.TruncateTail(0, CancellationToken.None).ConfigureAwait(false); // Remove all contents and reset write cursor back to 0 Utility.Assert( this.LogicalLog.Length == 0 && this.LogicalLog.WritePosition == 0, "{0}: this.logicalLog.Length: {1} this.logicalLog.WritePosition: {2}", this.tracer.Type, this.LogicalLog.Length, this.LogicalLog.WritePosition); } using (PhysicalLogWriter logWriter = new PhysicalLogWriter( this.LogicalLog, this.emptyCallbackManager, this.Tracer, this.MaxWriteCacheSizeInMB, this.IncomingBytesRateCounterWriter, this.LogFlushBytesRateCounterWriter, this.BytesPerFlushCounterWriter, this.AvgFlushLatencyCounterWriter, this.AvgSerializationLatencyCounterWriter, true)) { LogRecord record = null; LogRecordsMap logRecordsMap = null; long bufferedRecordsSizeBytes = -1; long backupRecordIndex = 0; foreach (string backupLogFilePath in backupLogFilePathList) { BackupLogFile backupLogFile = await BackupLogFile.OpenAsync( backupLogFilePath, CancellationToken.None).ConfigureAwait(false); using (var backupLogEnumerator = backupLogFile.GetAsyncEnumerator()) { if (logRecordsMap == null) { bool hasFirstRecord = await backupLogEnumerator.MoveNextAsync(CancellationToken.None).ConfigureAwait(false); Utility.Assert(hasFirstRecord, "{0}: Backup must include at least six records.", this.tracer.Type); // If the log is being restored. // First record must be a indexing log record. Flush it. LogRecord firstRecordFromBackupLog = backupLogEnumerator.Current; Utility.Assert( null != firstRecordFromBackupLog, "{0}: BackupLogEnumerator will never return null", this.tracer.Type); Utility.Assert( false == LogRecord.IsInvalidRecord(firstRecordFromBackupLog), "{0}: First record read from the backup log cannot be invalid", this.tracer.Type); IndexingLogRecord logHead = firstRecordFromBackupLog as IndexingLogRecord; Utility.Assert( null != logHead, "{0}: First record read from the backup log must be indexing log record: Type: {1} LSN: {2} PSN: {3} Position: {4}", this.tracer.Type, firstRecordFromBackupLog.RecordType, firstRecordFromBackupLog.Lsn.LSN, firstRecordFromBackupLog.Psn.PSN, firstRecordFromBackupLog.RecordPosition); logRecordsMap = new LogRecordsMap(logHead, this.Tracer); logRecordsMap.ProcessLogRecord(logHead); bufferedRecordsSizeBytes = logWriter.InsertBufferedRecord(logHead); // Note that logHead.PreviousPhysicalRecord is an InvalidLogRecord. backupRecordIndex++; FabricEvents.Events.RestoreOperationAsync( this.Tracer.Type, logHead.RecordType.ToString(), backupRecordIndex, logHead.Lsn.LSN, logHead.Psn.PSN, long.MaxValue); } while (await backupLogEnumerator.MoveNextAsync(CancellationToken.None).ConfigureAwait(false)) { record = backupLogEnumerator.Current; logRecordsMap.ProcessLogRecord(record); // Note: Function inserts a record to the buffer where it waits to be flushed and return // the new size of the whole buffer. bufferedRecordsSizeBytes = logWriter.InsertBufferedRecord(record); backupRecordIndex++; FabricEvents.Events.RestoreOperationAsync( this.Tracer.Type, record.RecordType.ToString(), backupRecordIndex, record.Lsn.LSN, record.Psn.PSN, record.PreviousPhysicalRecord.Psn.PSN); // TODO: Use a backup config for this flush size determination if (bufferedRecordsSizeBytes > flushEveryNKB * 1024) { string intermediateFlushMessage = string.Format( CultureInfo.InvariantCulture, "LogManager: OpenWithRestoreFile (Restore) Intermediate Flush Size: {0} bytes, Index: {1}", bufferedRecordsSizeBytes, backupRecordIndex); await logWriter.FlushAsync(intermediateFlushMessage).ConfigureAwait(false); // This additional await is required to ensure the log record was indeed flushed. // Without this, the flushasync could succeed, but the log record flush could have failed due to a write error await record.AwaitFlush().ConfigureAwait(false); bufferedRecordsSizeBytes = 0; } } } } // Note: Move the last flush for remaining items out of the loop to avoid unnecessary flush in the // case of each iteration has a small number of un-flushed log records. if (bufferedRecordsSizeBytes > 0) { string flushMessage = string.Format( CultureInfo.InvariantCulture, "LogManager: OpenWithRestoreFile (Restore)"); await logWriter.FlushAsync(flushMessage).ConfigureAwait(false); // This additional await is required to ensure the log record was indeed flushed. // Without this, the flushasync could succeed, but the log record flush could have failed due to a write error await record.AwaitFlush().ConfigureAwait(false); } } return(new PhysicalLogReader(this)); }