Example #1
0
        private static LogRecord ReadFromOperationData(OperationData operationData)
        {
            LogRecord     record;
            long          lsn;
            const ulong   RecordPosition = InvalidRecordPosition;
            LogRecordType recordType;
            var           index = -1;

            using (var reader = new BinaryReader(IncrementIndexAndGetMemoryStreamAt(operationData, ref index)))
            {
                // Logical metadata section.
                var startingPosition = reader.BaseStream.Position;
                var sizeOfSection    = reader.ReadInt32();
                var endPosition      = startingPosition + sizeOfSection;

                // Logical metadata read.
                recordType = (LogRecordType)reader.ReadUInt32();
                lsn        = reader.ReadInt64();

                // Jump to the end of the section ignoring fields that are not understood.
                Utility.Assert(endPosition >= reader.BaseStream.Position, "Could not have read more than section size.");
                reader.BaseStream.Position = endPosition;
            }

            switch (recordType)
            {
            case LogRecordType.BeginTransaction:
                record = new BeginTransactionOperationLogRecord(recordType, RecordPosition, lsn);
                break;

            case LogRecordType.Operation:
                record = new OperationLogRecord(recordType, RecordPosition, lsn);
                break;

            case LogRecordType.EndTransaction:
                record = new EndTransactionLogRecord(recordType, RecordPosition, lsn);
                break;

            case LogRecordType.Barrier:
                record = new BarrierLogRecord(recordType, RecordPosition, lsn);
                break;

            case LogRecordType.UpdateEpoch:
                record = new UpdateEpochLogRecord(recordType, RecordPosition, lsn);
                break;

            case LogRecordType.Backup:
                record = new BackupLogRecord(recordType, RecordPosition, lsn);
                break;

            default:
                Utility.CodingError(
                    "Unexpected record type received during replication/copy processing {0}",
                    recordType);
                return(null);
            }

            record.ReadLogical(operationData, ref index);
            return(record);
        }
Example #2
0
        /// <summary>
        /// Process the barrier log record.
        /// </summary>
        /// <param name="backupLogRecord">The backupLogRecord record to be processed.</param>
        /// <param name="isRecoverableRecord">Is this a recoverable record.</param>
        private void ProcessLogRecord(BackupLogRecord backupLogRecord, out bool isRecoverableRecord)
        {
            // For incremental backup chaining, we need to recover the backup log records.
            isRecoverableRecord = true;

            this.LastLogicalSequenceNumber++;

            Utility.Assert(
                backupLogRecord.Lsn == this.LastLogicalSequenceNumber,
                "backupLogRecord.LastLogicalSequenceNumber == lsn");
        }
Example #3
0
        public IncrementalBackupLogRecordAsyncEnumerator(
            string traceType,
            Guid backupId,
            IAsyncEnumerator <LogRecord> source,
            BackupLogRecord lastBackupLogRecord,
            IBackupRestoreProvider loggingReplicator)
        {
            this.traceType           = traceType;
            this.backupId            = backupId;
            this.source              = source;
            this.lastBackupLogRecord = lastBackupLogRecord;
            this.loggingReplicator   = loggingReplicator;
            this.startingEpoch       = LoggingReplicator.InvalidEpoch;
            this.startingLsn         = LogicalSequenceNumber.InvalidLsn;
            this.lastEpoch           = LoggingReplicator.InvalidEpoch;
            this.lastLsn             = LogicalSequenceNumber.InvalidLsn;

            this.count = 0;
        }
Example #4
0
        internal IncrementalBackupLogRecordAsyncEnumerator(
            string traceType,
            Guid backupId,
            IAsyncEnumerator <LogRecord> source,
            BackupLogRecord lastBackupLogRecord,
            Epoch startingEpoch,
            LogicalSequenceNumber startingLsn)
        {
            this.traceType           = traceType;
            this.backupId            = backupId;
            this.source              = source;
            this.lastBackupLogRecord = lastBackupLogRecord;
            this.loggingReplicator   = null;

            // TODO: verify startingEpoch & startingLsn is not invalid
            this.startingEpoch = startingEpoch;
            this.startingLsn   = startingLsn;
            this.lastEpoch     = LoggingReplicator.InvalidEpoch;
            this.lastLsn       = LogicalSequenceNumber.InvalidLsn;

            this.count = 0;
        }
        /// <summary>
        /// Initializes a new instance of the BeginCheckpointLogRecord class.
        /// </summary>
        /// <remarks>Called when the replicator decides to checkpoint.</remarks>
        internal BeginCheckpointLogRecord(
            bool isFirstCheckpointOnFullCopy,
            ProgressVector progressVector,
            BeginTransactionOperationLogRecord earliestPendingTransaction,
            Epoch headEpoch,
            Epoch epoch,
            LogicalSequenceNumber lsn,
            PhysicalLogRecord lastLinkedPhysicalRecord,
            BackupLogRecord lastCompletedBackupLogRecord,
            uint progressVectorMaxEntries,
            long periodicCheckpointTimeTicks,
            long periodicTruncationTimeTicks)
            : base(LogRecordType.BeginCheckpoint, lsn, lastLinkedPhysicalRecord)
        {
            this.IsFirstCheckpointOnFullCopy = isFirstCheckpointOnFullCopy;
            this.progressVector = ProgressVector.Clone(progressVector, progressVectorMaxEntries, lastCompletedBackupLogRecord.HighestBackedUpEpoch, headEpoch);

            this.earliestPendingTransactionOffset = LogicalLogRecord.InvalidLogicalRecordOffset;
            this.earliestPendingTransaction       = earliestPendingTransaction;
            this.checkpointState = CheckpointState.Invalid;
            this.lastStableLsn   = LogicalSequenceNumber.InvalidLsn;
            this.epoch           = (earliestPendingTransaction != null) ? earliestPendingTransaction.RecordEpoch : epoch;

            // Initialize backup log record fields.
            this.highestBackedUpEpoch = lastCompletedBackupLogRecord.HighestBackedUpEpoch;
            this.highestBackedUpLsn   = lastCompletedBackupLogRecord.HighestBackedUpLsn;

            this.backupLogRecordCount = lastCompletedBackupLogRecord.BackupLogRecordCount;
            this.backupLogSize        = lastCompletedBackupLogRecord.BackupLogSizeInKB;

            this.earliestPendingTransactionInvalidated = 0;

            this.lastPeriodicCheckpointTimeTicks = periodicCheckpointTimeTicks;
            this.lastPeriodicTruncationTimeTicks = periodicTruncationTimeTicks;
            this.UpdateApproximateDiskSize();
        }
Example #6
0
        /// <summary>
        /// Create a new <see cref="BackupLogFile"/> and write it to the given file.
        /// </summary>
        /// <param name="fileName">Name of the backup file.</param>
        /// <param name="logRecords">The log records.</param>
        /// <param name="lastBackupLogRecord"></param>
        /// <param name="cancellationToken">Token used to signal cancellation.</param>
        /// <returns>The new <see cref="BackupLogFile"/>.</returns>
        public static async Task <BackupLogFile> CreateAsync(
            string fileName,
            IAsyncEnumerator <LogRecord> logRecords,
            BackupLogRecord lastBackupLogRecord,
            CancellationToken cancellationToken)
        {
            var stopwatch = new Stopwatch();

            stopwatch.Start();

            var backupLogFile = new BackupLogFile(fileName);

            backupLogFile.properties = new BackupLogFileProperties();

            // Create the file with asynchronous flag and 4096 cache size (C# default).
            using (var filestream = FabricFile.Open(
                       fileName,
                       FileMode.CreateNew,
                       FileAccess.Write,
                       FileShare.Write,
                       4096,
                       FileOptions.Asynchronous))
            {
                Utility.SetIoPriorityHint(filestream.SafeFileHandle, Kernel32Types.PRIORITY_HINT.IoPriorityHintLow);

                var incrementalBackupRecords = logRecords as IncrementalBackupLogRecordAsyncEnumerator;
                if (incrementalBackupRecords == null)
                {
                    await backupLogFile.WriteLogRecordsAsync(filestream, logRecords, cancellationToken).ConfigureAwait(false);
                }
                else
                {
                    await backupLogFile.WriteLogRecordsAsync(filestream, incrementalBackupRecords, cancellationToken).ConfigureAwait(false);

                    await incrementalBackupRecords.VerifyDrainedAsync().ConfigureAwait(false);

                    Utility.Assert(backupLogFile.Count == incrementalBackupRecords.Count, "Unexpected count");

                    backupLogFile.properties.IndexingRecordEpoch = incrementalBackupRecords.StartingEpoch;
                    backupLogFile.properties.IndexingRecordLsn   = incrementalBackupRecords.StartingLsn;

                    if (incrementalBackupRecords.HighestBackedupEpoch == LoggingReplicator.InvalidEpoch)
                    {
                        backupLogFile.properties.LastBackedUpEpoch = lastBackupLogRecord.HighestBackedUpEpoch;
                    }
                    else
                    {
                        backupLogFile.properties.LastBackedUpEpoch = incrementalBackupRecords.HighestBackedupEpoch;
                    }

                    backupLogFile.properties.LastBackedUpLsn = incrementalBackupRecords.HighestBackedupLsn;
                }

                // Write the log records.
                backupLogFile.properties.RecordsHandle = new BlockHandle(offset: 0, size: filestream.Position);

                cancellationToken.ThrowIfCancellationRequested();

                // Write the properties.
                var propertiesHandle =
                    await FileBlock.WriteBlockAsync(filestream, writer => backupLogFile.properties.Write(writer)).ConfigureAwait(false);

                cancellationToken.ThrowIfCancellationRequested();

                // Write the footer.
                backupLogFile.footer = new FileFooter(propertiesHandle, Version);
                await FileBlock.WriteBlockAsync(filestream, writer => backupLogFile.footer.Write(writer)).ConfigureAwait(false);

                cancellationToken.ThrowIfCancellationRequested();

                // Store the size.
                backupLogFile.Size = filestream.Length;
            }

            stopwatch.Stop();
            backupLogFile.WriteTimeInMilliseconds = stopwatch.ElapsedMilliseconds;

            return(backupLogFile);
        }
Example #7
0
        private static LogRecord ReadRecord(BinaryReader br, ulong recordPosition, bool isPhysicalRead)
        {
            LogRecord     record;
            var           lsn = LogicalSequenceNumber.InvalidLsn.LSN;
            LogRecordType recordType;

            // Metadata section.
            var startingPosition = br.BaseStream.Position;
            var sizeOfSection    = br.ReadInt32();
            var endPosition      = startingPosition + sizeOfSection;

            // Read Logical Metadata
            recordType = (LogRecordType)br.ReadUInt32();

            switch (recordType)
            {
            case LogRecordType.BeginTransaction:
                record = new BeginTransactionOperationLogRecord(recordType, recordPosition, lsn);
                break;

            case LogRecordType.Operation:
                record = new OperationLogRecord(recordType, recordPosition, lsn);
                break;

            case LogRecordType.EndTransaction:
                record = new EndTransactionLogRecord(recordType, recordPosition, lsn);
                break;

            case LogRecordType.Barrier:
                record = new BarrierLogRecord(recordType, recordPosition, lsn);
                break;

            case LogRecordType.UpdateEpoch:
                record = new UpdateEpochLogRecord(recordType, recordPosition, lsn);
                break;

            case LogRecordType.Backup:
                record = new BackupLogRecord(recordType, recordPosition, lsn);
                break;

            case LogRecordType.BeginCheckpoint:
                record = new BeginCheckpointLogRecord(recordType, recordPosition, lsn);
                break;

            case LogRecordType.EndCheckpoint:
                record = new EndCheckpointLogRecord(recordType, recordPosition, lsn);
                break;

            case LogRecordType.Indexing:
                record = new IndexingLogRecord(recordType, recordPosition, lsn);
                break;

            case LogRecordType.TruncateHead:
                record = new TruncateHeadLogRecord(recordType, recordPosition, lsn);
                break;

            case LogRecordType.TruncateTail:
                record = new TruncateTailLogRecord(recordType, recordPosition, lsn);
                break;

            case LogRecordType.Information:
                record = new InformationLogRecord(recordType, recordPosition, lsn);
                break;

            case LogRecordType.CompleteCheckpoint:
                record = new CompleteCheckpointLogRecord(recordType, recordPosition, lsn);
                break;

            default:
                Utility.CodingError("Unexpected record type {0}", recordType);
                return(null);
            }

            record.lsn = new LogicalSequenceNumber(br.ReadInt64());

            // Jump to the end of the section ignoring fields that are not understood.
            Utility.Assert(endPosition >= br.BaseStream.Position, "Could not have read more than section size.");
            br.BaseStream.Position = endPosition;

            record.Read(br, isPhysicalRead);

            return(record);
        }