コード例 #1
0
        /// <summary>
        /// Initializes a new instance of the BeginCheckpointLogRecord class.
        /// </summary>
        /// <remarks>Only used for generating invalid BeginCheckpointLogRecord.</remarks>
        private BeginCheckpointLogRecord()
        {
            this.IsFirstCheckpointOnFullCopy = false;
            this.progressVector = null;
            this.earliestPendingTransactionOffset = LogicalLogRecord.InvalidLogicalRecordOffset;
            this.earliestPendingTransaction       = BeginTransactionOperationLogRecord.InvalidBeginTransactionLogRecord;
            this.checkpointState = CheckpointState.Invalid;
            this.lastStableLsn   = LogicalSequenceNumber.InvalidLsn;
            this.epoch           = LogicalSequenceNumber.InvalidEpoch;

            // Initializes the backup information to invalid.
            this.highestBackedUpEpoch = new Epoch(
                LogicalSequenceNumber.InvalidLsn.LSN,
                LogicalSequenceNumber.InvalidLsn.LSN);
            this.highestBackedUpLsn = LogicalSequenceNumber.InvalidLsn;

            // Uint.MaxValue is used to indicate invalid. 4,294,967,295 log records, 4.294967295 TB.
            this.backupLogRecordCount = uint.MaxValue;
            this.backupLogSize        = uint.MaxValue;

            this.earliestPendingTransactionInvalidated = 0;

            this.lastPeriodicCheckpointTimeTicks = 0;
            this.lastPeriodicTruncationTimeTicks = 0;
        }
コード例 #2
0
ファイル: LogHeadRecord.cs プロジェクト: zmyer/service-fabric
        protected override void Read(BinaryReader br, bool isPhysicalRead)
        {
            base.Read(br, isPhysicalRead);

            var startingPosition = br.BaseStream.Position;
            var sizeOfSection    = br.ReadInt32();
            var endPosition      = startingPosition + sizeOfSection;

            var dataLossNumber      = br.ReadInt64();
            var configurationNumber = br.ReadInt64();

            this.logHeadEpoch = new Epoch(dataLossNumber, configurationNumber);
            this.logHeadLsn   = new LogicalSequenceNumber(br.ReadInt64());
            this.logHeadPsn   = new PhysicalSequenceNumber(br.ReadInt64());
            if (isPhysicalRead == true)
            {
                this.logHeadRecordOffset = br.ReadUInt64();
            }

            // Jump to the end of the section ignoring fields that are not understood.
            Utility.Assert(endPosition >= br.BaseStream.Position, "Could not have read more than section size.");
            br.BaseStream.Position = endPosition;

            this.UpdateApproximateDiskSize();
            return;
        }
コード例 #3
0
 internal ProgressVectorEntry(Epoch epoch, LogicalSequenceNumber lsn, long primaryReplicaId, DateTime timestamp)
 {
     this.epoch            = epoch;
     this.lsn              = lsn;
     this.primaryReplicaId = primaryReplicaId;
     this.timestamp        = timestamp;
 }
コード例 #4
0
        internal BeginCheckpointLogRecord(LogRecordType recordType, ulong recordPosition, long lsn)
            : base(recordType, recordPosition, lsn)
        {
            Utility.Assert(
                recordType == LogRecordType.BeginCheckpoint,
                "Record type is expected to be begin checkpoint  but the record type is : {0}",
                recordType);
            this.IsFirstCheckpointOnFullCopy = false;
            this.progressVector = null;
            this.earliestPendingTransactionOffset = LogicalLogRecord.InvalidLogicalRecordOffset;
            this.earliestPendingTransaction       = BeginTransactionOperationLogRecord.InvalidBeginTransactionLogRecord;
            this.checkpointState = CheckpointState.Invalid;
            this.lastStableLsn   = LogicalSequenceNumber.InvalidLsn;
            this.epoch           = LogicalSequenceNumber.InvalidEpoch;

            // Initializes the backup information to invalid.
            this.backupId             = BackupLogRecord.InvalidBackupId;
            this.highestBackedUpEpoch = new Epoch(
                LogicalSequenceNumber.InvalidLsn.LSN,
                LogicalSequenceNumber.InvalidLsn.LSN);
            this.highestBackedUpLsn = LogicalSequenceNumber.InvalidLsn;

            // Uint.MaxValue is used to indicate invalid. 4,294,967,295 log records, 4.294967295 TB.
            this.backupLogRecordCount = uint.MaxValue;
            this.backupLogSize        = uint.MaxValue;

            this.earliestPendingTransactionInvalidated = 0;

            this.lastPeriodicCheckpointTimeTicks = 0;
            this.lastPeriodicTruncationTimeTicks = 0;
        }
コード例 #5
0
 public CopyContextParameters(ProgressVector pv, Epoch logHeadEpoch, LogicalSequenceNumber logHeadLsn, LogicalSequenceNumber logTailLsn)
 {
     this.ProgressVector = pv;
     this.LogHeadEpoch   = logHeadEpoch;
     this.LogHeadLsn     = logHeadLsn;
     this.LogTailLsn     = logTailLsn;
 }
コード例 #6
0
        internal void RemoveStableTransactions(LogicalSequenceNumber lastStableLsn)
        {
            lock (this.txmapLock)
            {
                for (var i = this.unstableTransactions.Count - 1; i >= 0; i--)
                {
                    if (this.unstableTransactions[i].Lsn <= lastStableLsn)
                    {
                        for (var j = 0; j <= i; j++)
                        {
                            for (var k = 0; k < this.completedTransactions.Count; k++)
                            {
                                if (this.completedTransactions[k].Transaction
                                    == this.unstableTransactions[j].Transaction)
                                {
                                    this.completedTransactions.RemoveAt(k);
                                    break;
                                }
                            }
                        }

                        this.unstableTransactions.RemoveRange(0, i);
                        break;
                    }
                }
            }
        }
コード例 #7
0
        /// <summary>
        /// The read from operation data.
        /// </summary>
        /// <param name="operationData">
        /// The operation data.
        /// </param>
        /// <returns>
        /// The <see cref="CopyMetadata"/>.
        /// </returns>
        public static CopyMetadata ReadFromOperationData(OperationData operationData)
        {
            var copiedProgressVector = new ProgressVector();

            using (var br = new BinaryReader(new MemoryStream(operationData[0].Array, operationData[0].Offset, operationData[0].Count)))
            {
                var copyStateMetadataVersion = br.ReadInt32();
                copiedProgressVector.Read(br, false);
                var startingEpoch = new Epoch(br.ReadInt64(), br.ReadInt64());
                var startingLsn   = new LogicalSequenceNumber(br.ReadInt64());
                var checkpointLsn = new LogicalSequenceNumber(br.ReadInt64());
                var uptoLsn       = new LogicalSequenceNumber(br.ReadInt64());
                var highestStateProviderCopiedLsn = new LogicalSequenceNumber(br.ReadInt64());

                // Note that if version is 1, then the size must be exactly as expected.
                // Else the rest of the data is expected to be not required.
                Utility.Assert(
                    copyStateMetadataVersion != 1 || operationData[0].Array.Length == br.BaseStream.Position,
                    "Unexpected copy state metadata size. Version {0} logMetadata.Length {1} Position {2}",
                    copyStateMetadataVersion, operationData[0].Array.Length, br.BaseStream.Position);

                return(new CopyMetadata(
                           copyStateMetadataVersion,
                           copiedProgressVector,
                           startingEpoch,
                           startingLsn,
                           checkpointLsn,
                           uptoLsn,
                           highestStateProviderCopiedLsn));
            }
        }
コード例 #8
0
 internal LoggingReplicatorCopyStream(
     ReplicatedLogManager replicatedLogManager,
     IStateManager stateManager,
     CheckpointManager checkpointManager,
     Func <LogicalSequenceNumber, Task> waitForLogFlushUptoLsn,
     long replicaId,
     LogicalSequenceNumber uptoLsn,
     IOperationDataStream copyContext,
     ITracer tracer)
 {
     this.stateManager           = stateManager;
     this.checkpointManager      = checkpointManager;
     this.replicatedLogManager   = replicatedLogManager;
     this.replicaId              = replicaId;
     this.waitForLogFlushUptoLsn = waitForLogFlushUptoLsn;
     this.uptoLsn               = uptoLsn;
     this.copyContext           = copyContext;
     this.targetReplicaId       = 0;
     this.targetProgressVector  = null;
     this.targetLogHeadEpoch    = LogicalSequenceNumber.InvalidEpoch;
     this.targetLogHeadLsn      = LogicalSequenceNumber.InvalidLsn;
     this.currentTargetLsn      = LogicalSequenceNumber.InvalidLsn;
     this.copyStage             = CopyStage.CopyMetadata;
     this.copyStateStream       = null;
     this.copiedRecordNumber    = 0;
     this.sourceStartingLsn     = LogicalSequenceNumber.InvalidLsn;
     this.targetStartingLsn     = LogicalSequenceNumber.InvalidLsn;
     this.logRecordsToCopy      = null;
     this.beginCheckpointRecord = null;
     this.bw         = new BinaryWriter(new MemoryStream());
     this.isDisposed = false;
     this.tracer     = tracer;
 }
コード例 #9
0
 internal CompleteCheckpointLogRecord(
     LogicalSequenceNumber lsn,
     IndexingLogRecord logHeadRecord,
     PhysicalLogRecord lastLinkedPhysicalRecord)
     : base(LogRecordType.CompleteCheckpoint, logHeadRecord, lsn, lastLinkedPhysicalRecord)
 {
 }
コード例 #10
0
ファイル: LogRecord.cs プロジェクト: zmyer/service-fabric
        private static LogRecord ReadFromOperationData(OperationData operationData)
        {
            LogRecord     record;
            long          lsn;
            const ulong   RecordPosition = InvalidRecordPosition;
            LogRecordType recordType;
            var           index = -1;

            using (var reader = new BinaryReader(IncrementIndexAndGetMemoryStreamAt(operationData, ref index)))
            {
                // Logical metadata section.
                var startingPosition = reader.BaseStream.Position;
                var sizeOfSection    = reader.ReadInt32();
                var endPosition      = startingPosition + sizeOfSection;

                // Logical metadata read.
                recordType = (LogRecordType)reader.ReadUInt32();
                lsn        = reader.ReadInt64();

                // Jump to the end of the section ignoring fields that are not understood.
                Utility.Assert(endPosition >= reader.BaseStream.Position, "Could not have read more than section size.");
                reader.BaseStream.Position = endPosition;
            }

            switch (recordType)
            {
            case LogRecordType.BeginTransaction:
                record = new BeginTransactionOperationLogRecord(recordType, RecordPosition, lsn);
                break;

            case LogRecordType.Operation:
                record = new OperationLogRecord(recordType, RecordPosition, lsn);
                break;

            case LogRecordType.EndTransaction:
                record = new EndTransactionLogRecord(recordType, RecordPosition, lsn);
                break;

            case LogRecordType.Barrier:
                record = new BarrierLogRecord(recordType, RecordPosition, lsn);
                break;

            case LogRecordType.UpdateEpoch:
                record = new UpdateEpochLogRecord(recordType, RecordPosition, lsn);
                break;

            case LogRecordType.Backup:
                record = new BackupLogRecord(recordType, RecordPosition, lsn);
                break;

            default:
                Utility.CodingError(
                    "Unexpected record type received during replication/copy processing {0}",
                    recordType);
                return(null);
            }

            record.ReadLogical(operationData, ref index);
            return(record);
        }
コード例 #11
0
 public void Reuse()
 {
     InitializeCopyAndReplicationDrainTcs();
     this.copiedUptoLsn          = LogicalSequenceNumber.MaxLsn;
     this.ReadConsistentAfterLsn = LogicalSequenceNumber.MaxLsn;
     this.replicationStreamDrainCompletionTcs.TrySetResult(null);
     this.copyStreamDrainCompletionTcs.TrySetResult(null);
 }
コード例 #12
0
 public void TruncateTail(LogicalSequenceNumber tailLsn)
 {
     lock (lsnOrderingLock)
     {
         var record = new TruncateTailLogRecord(tailLsn, this.LastLinkedPhysicalRecord);
         this.LogManager.PhysicalLogWriter.InsertBufferedRecord(record);
     }
 }
コード例 #13
0
ファイル: LogHeadRecord.cs プロジェクト: zmyer/service-fabric
 internal LogHeadRecord(LogRecordType recordType, ulong recordPosition, long lsn)
     : base(recordType, recordPosition, lsn)
 {
     this.logHeadEpoch        = LogicalSequenceNumber.InvalidEpoch;
     this.logHeadLsn          = LogicalSequenceNumber.InvalidLsn;
     this.logHeadPsn          = PhysicalSequenceNumber.InvalidPsn;
     this.logHeadRecordOffset = InvalidPhysicalRecordOffset;
     this.logHeadRecord       = IndexingLogRecord.InvalidIndexingLogRecord;
 }
コード例 #14
0
 internal InformationLogRecord(
     LogicalSequenceNumber lsn,
     PhysicalLogRecord linkedPhysicalRecord,
     InformationEvent informationEvent)
     : base(LogRecordType.Information, lsn, linkedPhysicalRecord)
 {
     this.Initialize(informationEvent);
     this.UpdateApproximateDiskSize();
 }
コード例 #15
0
 internal IndexingLogRecord(
     Epoch currentEpoch,
     LogicalSequenceNumber lsn,
     PhysicalLogRecord linkedPhysicalRecord)
     : base(LogRecordType.Indexing, lsn, linkedPhysicalRecord)
 {
     this.currentEpoch = currentEpoch;
     this.UpdateApproximateDiskSize();
 }
コード例 #16
0
        private async Task TruncateTailAsync(LogicalSequenceNumber tailLsn)
        {
            FabricEvents.Events.TruncateTail(this.tracer.Type, tailLsn.LSN);

            Utility.Assert(
                (this.roleContextDrainState.ReplicaRole == ReplicaRole.IdleSecondary) || (this.roleContextDrainState.ReplicaRole == ReplicaRole.ActiveSecondary),
                "(this.Role == ReplicaRole.IdleSecondary) || (this.Role == ReplicaRole.ActiveSecondary)");

            Utility.Assert(
                this.checkpointManager.LastStableLsn <= tailLsn,
                "this.lastStableLsn <= tailLsn. last stable lsn :{0}",
                this.checkpointManager.LastStableLsn.LSN);

            ApplyContext falseProgressApplyContext = ApplyContext.SecondaryFalseProgress;

            var truncateTailMgr = new TruncateTailManager(
                this.replicatedLogManager,
                this.transactionManager.TransactionsMap,
                this.stateManager,
                this.backupManager,
                tailLsn,
                falseProgressApplyContext,
                this.recoveryManager.RecoveryLogsReader,
                this.tracer);

            var currentRecord = await truncateTailMgr.TruncateTailAsync();

            Utility.Assert(
                currentRecord.Lsn == tailLsn,
                "TruncateTail: V1 replicator ensures that lsns are continuous. currentLsn {0} == tailLsn {1}",
                currentRecord.Lsn, tailLsn);

            this.checkpointManager.ResetStableLsn(tailLsn);
            this.recoveryManager.OnTailTruncation(tailLsn);

            // 6450429: Replicator maintains three values for supporting consistent reads and snapshot.
            // These values have to be updated as part of the false progress correction.
            // ReadConsistentAfterLsn is used to ensure that all state providers have applied the highest recovery or copy log operation
            // that they might have seen in their current checkpoint. Reading at a tailLsn below this cause inconsistent reads across state providers.
            // Since this was a FALSE PROGRESS copy, replica is still using the checkpoints it has recovered.
            // We have undone any operation that could have been false progressed in these checkpoints (Fuzzy checkpoint only) and ensured all state providers are applied to the same barrier.
            // Hence readConsistentAfterLsn is set to the new tail of the log.
            this.ReadConsistentAfterLsn = tailLsn;

            // lastAppliedBarrierRecord is used to get the current visibility sequence number.
            // Technically currentRecord (new tail record) may not be a barrier however it has the guarantee of a barrier:
            //      All records before tail, including the tail must have been applied.
            // Hence we the lastAppliedBarrierRecord to the new tail record.
            // Note: No other property but .Lsn can be used.
            this.recordsProcessor.TruncateTail(currentRecord);

            // Last value that is kept in the replicator (Version Manager) is the lastDispatchingBarrier.
            // This is used for read your writes support.
            // In this case it is not necessary to modify it since this replica either has not made any new progress (its own write) or it gets elected primary and before it can do anything else
            // dispatches a new barrier which will become the lastDispatchingBarrier
        }
コード例 #17
0
 internal BarrierLogRecord(LogicalSequenceNumber lastStableLsn)
     : base(LogRecordType.Barrier)
 {
     Utility.Assert(
         lastStableLsn != LogicalSequenceNumber.InvalidLsn,
         "lastStableLsn is expected to be invalid but the last stable lsn has value : {0}",
         lastStableLsn.LSN);
     this.lastStableLsn = lastStableLsn;
     this.UpdateApproximateDiskSize();
 }
コード例 #18
0
ファイル: KtlLogManager.cs プロジェクト: zmyer/service-fabric
        internal override async Task <IndexingLogRecord> CreateCopyLogAsync(
            Epoch startingEpoch,
            LogicalSequenceNumber startingLsn)
        {
            var flushCallback = this.PhysicalLogWriter.CallbackManager.Callback;

            await this.CloseCurrentLogAsync().ConfigureAwait(false);

            this.CurrentLogFileAlias = this.BaseLogFileAlias + CopySuffix;
            try
            {
                var aliasGuid =
                    await this.physicalLog.ResolveAliasAsync(this.CurrentLogFileAlias, CancellationToken.None).ConfigureAwait(false);

                FabricEvents.Events.LogManager(
                    this.Tracer.Type,
                    "CreateCopyLog: Attempt to delete logical log " + this.CurrentLogFileAlias + " guid: " + aliasGuid);

                await this.physicalLog.DeleteLogicalLogAsync(aliasGuid, CancellationToken.None).ConfigureAwait(false);
            }
            catch (Exception ex)
            {
                FabricEvents.Events.LogManager(
                    this.Tracer.Type,
                    "CreateCopyLog: Delete logical log: " + this.CurrentLogFileAlias + " failed: " + ex);
            }

            this.LogicalLog = await this.CreateLogFileAsync(true, CancellationToken.None).ConfigureAwait(false);

            var callbackManager = new PhysicalLogWriterCallbackManager(
                flushCallback,
                this.Tracer);

            this.PhysicalLogWriter = new PhysicalLogWriter(
                this.LogicalLog,
                callbackManager,
                this.Tracer,
                this.MaxWriteCacheSizeInMB,
                this.IncomingBytesRateCounterWriter,
                this.LogFlushBytesRateCounterWriter,
                this.BytesPerFlushCounterWriter,
                this.AvgFlushLatencyCounterWriter,
                this.AvgSerializationLatencyCounterWriter,
                false);

            var firstIndexingRecord = new IndexingLogRecord(startingEpoch, startingLsn, null);

            this.PhysicalLogWriter.InsertBufferedRecord(firstIndexingRecord);

            await this.PhysicalLogWriter.FlushAsync("CreateCopyLogAsync").ConfigureAwait(false);

            this.LogHeadRecordPosition = firstIndexingRecord.RecordPosition;
            return(firstIndexingRecord);
        }
コード例 #19
0
        internal static void Assert(bool condition, string format, LogicalSequenceNumber param1, LogRecordType param2)
        {
            if (condition == false)
            {
                var failFastMessage = string.Format(System.Globalization.CultureInfo.InvariantCulture, format, param1, param2);
                FailFast(failFastMessage);

                // AMW - Force break into debugger for ease of debugging
                Debugger.Break();
            }
        }
コード例 #20
0
        internal void Read(BinaryReader br, bool isPhysicalRead)
        {
            var dataLossNumber      = br.ReadInt64();
            var configurationNumber = br.ReadInt64();

            this.epoch            = new Epoch(dataLossNumber, configurationNumber);
            this.lsn              = new LogicalSequenceNumber(br.ReadInt64());
            this.primaryReplicaId = br.ReadInt64();
            this.timestamp        = DateTime.FromBinary(br.ReadInt64());

            return;
        }
コード例 #21
0
 internal TruncateHeadLogRecord(
     IndexingLogRecord logHeadRecord,
     LogicalSequenceNumber lsn,
     PhysicalLogRecord lastLinkedPhysicalRecord,
     bool isStable,
     long periodicTruncationTimeTicks)
     : base(LogRecordType.TruncateHead, logHeadRecord, lsn, lastLinkedPhysicalRecord)
 {
     this.truncationState             = TruncationState.Invalid;
     this.isStable                    = isStable;
     this.periodicTruncationTimeTicks = periodicTruncationTimeTicks;
     this.UpdateApproximateDiskSize();
 }
コード例 #22
0
 public static CopyModeResult CreatePartialCopyResult(
     SharedProgressVectorEntry sharedProgressVectorEntry,
     LogicalSequenceNumber sourceStartingLsn,
     LogicalSequenceNumber targetStartingLsn)
 {
     return(new CopyModeResult
     {
         SourceStartingLsn = sourceStartingLsn,
         TargetStartingLsn = targetStartingLsn,
         CopyMode = CopyMode.Partial,
         SharedProgressVectorEntry = sharedProgressVectorEntry
     });
 }
コード例 #23
0
        public async Task BlockSecondaryPumpIfNeeded(LogicalSequenceNumber lastStableLsn)
        {
            // If no checkpoints have happened on the secondary, it implies the copy is in progress and we cannot block as
            // we need to pump more operations to be able to issue the first checkpoint
            if (this.logManager.LastCompletedBeginCheckpointRecord == null)
            {
                return;
            }

            var       bytesUsedFromCurrentLogHead = this.GetBytesUsed(this.logManager.CurrentLogHeadRecord);
            LogRecord pendingOperationRecord      = this.logManager.LastInProgressCheckpointRecord;

            if (pendingOperationRecord == null)
            {
                pendingOperationRecord = this.logManager.LastInProgressTruncateHeadRecord;
            }

            if (pendingOperationRecord == null)
            {
                return;
            }

            // If there is pending checkpoint/truncation and the current barrier stable LSN is greater than the checkpoint/truncatehead LSN,
            // it implies that the checkpoint/truncation is in a ready state and it can be applied
            // The fact that it is still pending implies the actual perform checkpoint operation/truncation operation is taking a long time
            // and we hence block here
            // If the stable lsn is smaller, we need to accept more so that more group commits lead to more progress in stable lsn
            // V1 Repl will take care of throttling if we are not reaching stable (Queue full errors)
            if (pendingOperationRecord.Lsn > lastStableLsn)
            {
                return;
            }

            if (bytesUsedFromCurrentLogHead > this.throttleAtLogUsageBytes)
            {
                var logUsage = this.logManager.LogManager.Length;

                FabricEvents.Events.DrainReplicationBlocked(
                    this.tracer.Type,
                    pendingOperationRecord.RecordType.ToString(),
                    pendingOperationRecord.Lsn.LSN,
                    bytesUsedFromCurrentLogHead,
                    logUsage);

                await pendingOperationRecord.AwaitProcessing().ConfigureAwait(false);

                logUsage = this.logManager.LogManager.Length;

                FabricEvents.Events.DrainReplicationContinue(this.tracer.Type, bytesUsedFromCurrentLogHead, logUsage);
            }
        }
コード例 #24
0
        private void ReadPrivate(BinaryReader br, bool isPhysicalRead)
        {
            var startingPosition = br.BaseStream.Position;
            var sizeOfSection    = br.ReadInt32();
            var endPosition      = startingPosition + sizeOfSection;

            this.lastStableLsn = new LogicalSequenceNumber(br.ReadInt64());

            // Jump to the end of the section ignoring fields that are not understood.
            Utility.Assert(endPosition >= br.BaseStream.Position, "Could not have read more than section size.");
            br.BaseStream.Position = endPosition;

            this.UpdateApproximateDiskSize();
        }
コード例 #25
0
ファイル: LogHeadRecord.cs プロジェクト: zmyer/service-fabric
        internal LogHeadRecord(
            LogRecordType recordType,
            IndexingLogRecord logHeadRecord,
            LogicalSequenceNumber lsn,
            PhysicalLogRecord lastLinkedPhysicalRecord)
            : base(recordType, lsn, lastLinkedPhysicalRecord)
        {
            this.logHeadEpoch        = logHeadRecord.CurrentEpoch;
            this.logHeadLsn          = logHeadRecord.Lsn;
            this.logHeadPsn          = logHeadRecord.Psn;
            this.logHeadRecordOffset = InvalidPhysicalRecordOffset;
            this.logHeadRecord       = logHeadRecord;

            this.UpdateApproximateDiskSize();
        }
コード例 #26
0
        public async Task <bool> MoveNextAsync(CancellationToken cancellationToken)
        {
            while (true)
            {
                if (await this.source.MoveNextAsync(cancellationToken).ConfigureAwait(false) == false)
                {
                    return(false);
                }

                var logicalRecord = this.source.Current as LogicalLogRecord;
                if (logicalRecord == null)
                {
                    // In incremental backups, we do not have to copy physical log records.
                    continue;
                }

                // If the logical log record is already backed up skip it.
                if (this.source.Current.Lsn < this.lastBackupLogRecord.HighestBackedUpLsn)
                {
                    continue;
                }

                // UpdateEpoch is a logical log record that does not have a unique LSN.
                var updateEpochLogRecord = logicalRecord as UpdateEpochLogRecord;
                if (updateEpochLogRecord != null)
                {
                    // If the previous backup included this epoch, we can ignore it.
                    // If the previous backup included a higher epoch, it must have included this one or this one is stagger.
                    if (updateEpochLogRecord.Epoch.CompareTo(this.lastBackupLogRecord.HighestBackedUpEpoch) <= 0)
                    {
                        continue;
                    }

                    this.lastEpoch = updateEpochLogRecord.Epoch;
                }

                this.count++;
                this.lastLsn = logicalRecord.Lsn;

                if (this.startingLsn == LogicalSequenceNumber.InvalidLsn)
                {
                    this.startingLsn   = logicalRecord.Lsn;
                    this.startingEpoch = this.loggingReplicator.GetEpoch(logicalRecord.Lsn);
                }

                return(true);
            }
        }
コード例 #27
0
ファイル: LogRecord.cs プロジェクト: zmyer/service-fabric
        protected LogRecord(LogRecordType recordType)
        {
            this.recordType = recordType;
            this.lsn        = LogicalSequenceNumber.InvalidLsn;
            this.psn        = PhysicalSequenceNumber.InvalidPsn;
            this.previousPhysicalRecordOffset = InvalidPhysicalRecordOffset;

            this.recordLength           = InvalidRecordLength;
            this.recordPosition         = InvalidRecordPosition;
            this.previousPhysicalRecord = PhysicalLogRecord.InvalidPhysicalLogRecord;
            this.flushedTask            = new CompletionTask();
            this.appliedTask            = new CompletionTask();
            this.processedTask          = new CompletionTask();

            this.UpdateApproximateDiskSize();
        }
コード例 #28
0
ファイル: LogRecord.cs プロジェクト: zmyer/service-fabric
        protected LogRecord(LogRecordType recordType, ulong recordPosition, long lsn)
        {
            this.recordType = recordType;
            this.lsn        = new LogicalSequenceNumber(lsn);
            this.psn        = PhysicalSequenceNumber.InvalidPsn;
            this.previousPhysicalRecordOffset = InvalidPhysicalRecordOffset;

            this.recordPosition         = recordPosition;
            this.recordLength           = InvalidRecordLength;
            this.previousPhysicalRecord = PhysicalLogRecord.InvalidPhysicalLogRecord;
            this.flushedTask            = new CompletionTask();
            this.appliedTask            = new CompletionTask();
            this.processedTask          = new CompletionTask();

            this.ApproximateSizeOnDisk = 0;
        }
コード例 #29
0
ファイル: LogRecord.cs プロジェクト: zmyer/service-fabric
        protected LogRecord()
        {
            this.recordType = LogRecordType.Invalid;
            this.lsn        = LogicalSequenceNumber.InvalidLsn;
            this.psn        = PhysicalSequenceNumber.InvalidPsn;
            this.previousPhysicalRecordOffset = InvalidPhysicalRecordOffset;

            this.recordPosition         = InvalidRecordPosition;
            this.recordLength           = InvalidRecordLength;
            this.previousPhysicalRecord = null;
            this.flushedTask            = this.invalidCompletionTask;
            this.appliedTask            = this.invalidCompletionTask;
            this.processedTask          = this.invalidCompletionTask;

            this.ApproximateSizeOnDisk = 0;
        }
コード例 #30
0
        internal EndCheckpointLogRecord(
            BeginCheckpointLogRecord lastCompletedBeginCheckpointRecord,
            IndexingLogRecord logHeadRecord,
            LogicalSequenceNumber lsn,
            PhysicalLogRecord linkedPhysicalRecord)
            : base(LogRecordType.EndCheckpoint, logHeadRecord, lsn, linkedPhysicalRecord)
        {
            this.lastCompletedBeginCheckpointRecordOffset = InvalidPhysicalRecordOffset;
            this.lastStableLsn = lastCompletedBeginCheckpointRecord.LastStableLsn;
            this.lastCompletedBeginCheckpointRecord = lastCompletedBeginCheckpointRecord;
            Utility.Assert(
                this.lastStableLsn >= this.lastCompletedBeginCheckpointRecord.Lsn,
                "this.lastStableLsn >= this.lastCompletedBeginCheckpointRecord.LastLogicalSequenceNumber. Last stable lsn is : {0} and last completed checkpoint record is {1}",
                this.lastStableLsn.LSN, this.lastCompletedBeginCheckpointRecord.Lsn.LSN);

            this.UpdateApproximateDiskSize();
        }