Exemplo n.º 1
0
        public static async Task OpenAsync(MetadataTable metadataTable, Stream stream, string traceType)
        {
            // assert count is zero.
            Diagnostics.Assert(metadataTable.Table.Count == 0, traceType, "Count should be zero");

            await PopulateMetadataAsync(metadataTable, stream, traceType).ConfigureAwait(false);
        }
Exemplo n.º 2
0
        /// <summary>
        /// Gets the user value in the versioned item.
        /// </summary>
        /// <param name="metadataTable">The metadata table</param>
        /// <param name="valueSerializer">The value serializer.</param>
        /// <param name="isValueAReferenceType">Is reference type</param>
        /// <param name="readMode">The read mode.</param>
        /// <param name="valueCounter">Loaded value counter.</param>
        /// <param name="cancellationToken">Token used to signal cancellation.</param>
        /// <param name="traceType">trace Id</param>
        /// <param name="duringRecovery">Called during recovery.</param>
        /// <returns>The value.</returns>
        /// <remarks>
        /// Reference Type Life Cycle
        /// At Recovery:    [InUse = false, CanBeSweepedToDisk = true, Value = null]
        /// Read:           [InUse = readMode == CacheResult] [Value = CacheResult?] [CanBeSweepedToDisk = value != null]
        ///
        /// Value Type Life Cycle
        /// At Recovery:    [InUse = false, CanBeSweepedToDisk = true, Value = default(TValue)]
        /// First Read:     [Value = CacheResult?] [InUse = true] [CanBeSweepedToDisk = false]
        /// </remarks>
        public virtual async Task <TValue> GetValueAsync(
            MetadataTable metadataTable,
            IStateSerializer <TValue> valueSerializer,
            bool isValueAReferenceType,
            ReadMode readMode,
            LoadValueCounter valueCounter,
            CancellationToken cancellationToken,
            string traceType,
            bool duringRecovery = false)
        {
            TValue value = this.Value;

            if (this.ShouldValueBeLoadedFromDisk(isValueAReferenceType, value, traceType) == false || readMode == ReadMode.Off)
            {
                return(value);
            }

            Diagnostics.Assert(duringRecovery == true || readMode != ReadMode.CacheResult || isValueAReferenceType == false || this.InUse == true, traceType, "If CacheResult, InUse must have been set.");
            value = await MetadataManager.ReadValueAsync <TValue>(metadataTable.Table, this, valueSerializer, cancellationToken, traceType).ConfigureAwait(false);

            // Value must be set before updating the flags.
            // Example problem: After recovery two reads on the same key,
            // T1 sees InUse (IsLoaded) == false, reads the value, updates InUse, *Context Switch*
            // T2 sees InUse (IsLoaded) == true, reads this.Value which has not been set by T1.
            if (this.ShouldCacheValue(readMode, isValueAReferenceType))
            {
                this.Value = value;
                valueCounter.IncrementCounter();
            }

            // Flags must always be updated. For value type, inUse (IsLoadedFromDiskAfterRecovery) must be set.
            this.UpdateFlagsFollowingLoadValue(isValueAReferenceType, value, readMode, traceType);

            return(value);
        }
Exemplo n.º 3
0
        /// <summary>
        ///
        /// </summary>
        /// <remarks>Exposed for testability.</remarks>
        internal static async Task PopulateMetadataAsync(MetadataTable metadataTable, Stream stream, string traceType)
        {
            // Read and validate the Footer section.  The footer is always at the end of the stream, minus space for the checksum.
            var footerHandle = new BlockHandle(stream.Length - FileFooter.SerializedSize - sizeof(ulong), FileFooter.SerializedSize);
            var footer       = await FileBlock.ReadBlockAsync(stream, footerHandle, (sectionReader, sectionHandle) => FileFooter.Read(sectionReader, sectionHandle)).ConfigureAwait(false);

            // Verify we know how to deserialize this version of the file.
            if (footer.Version != FileVersion)
            {
                throw new InvalidDataException(SR.Error_MetadataManager_Deserialized);
            }

            // Read and validate the Properties section.
            var propertiesHandle = footer.PropertiesHandle;
            var properties       =
                await
                FileBlock.ReadBlockAsync(
                    stream,
                    propertiesHandle,
                    (sectionReader, sectionHandle) => FilePropertySection.Read <MetadataManagerFileProperties>(sectionReader, sectionHandle)).ConfigureAwait(false);

            // Read disk metadata into memory.
            await ReadDiskMetadataAsync(metadataTable.Table, stream, properties, traceType).ConfigureAwait(false);

            metadataTable.CheckpointLSN = properties.CheckpointLSN;
        }
Exemplo n.º 4
0
        private void Dispose(bool disposing)
        {
            if (!this.disposed)
            {
                if (disposing)
                {
                    // Trace that copy has completed if we started.
                    FabricEvents.Events.CopyAsync(this.traceType, "disposing");

                    if (this.currentFileStream != null)
                    {
                        this.currentFileStream.Dispose();
                        this.currentFileStream = null;
                    }

                    if (this.copySnapshotOfMetadataTableEnumerator != null)
                    {
                        this.copySnapshotOfMetadataTableEnumerator.Dispose();
                        this.copySnapshotOfMetadataTableEnumerator = null;
                    }

                    if (this.copySnapshotOfMetadataTable != null)
                    {
                        this.copySnapshotOfMetadataTable.ReleaseRef();
                        this.copySnapshotOfMetadataTable = null;
                    }
                }

                this.copyStage = CopyStage.None;
            }

            this.disposed = true;
        }
Exemplo n.º 5
0
        public bool ShouldMerge(MetadataTable mergeTable, ConsolidationMode mode, out List <uint> mergeList)
        {
            if (mode == ConsolidationMode.GDPR && ShouldPerformGDPRMerge(mergeTable, out mergeList))
            {
#if !DotNetCoreClr
                FabricEvents.Events.MergePolicy(this.traceType, MergePolicy.GDPR.ToString());
#endif
                return(true);
            }

            if (this.IsMergePolicyEnabled(MergePolicy.SizeOnDisk))
            {
                bool shouldMerge = this.ShouldMergeForSizeOnDiskPolicy(mergeTable);

                if (shouldMerge)
                {
                    mergeList = new List <uint>(mergeTable.Table.Keys);
#if !DotNetCoreClr
                    FabricEvents.Events.MergePolicy(this.traceType, MergePolicy.SizeOnDisk.ToString());
#endif
                    return(true);
                }
            }

            // If we have enough files on disk to merge, then check the merge policies
            if (mergeTable.Table.Count < this.MergeFilesCountThreshold)
            {
                mergeList = null;
                return(false);
            }

            if (this.IsMergePolicyEnabled(MergePolicy.FileCount))
            {
                ConditionalValue <List <uint> > fileCountMergeConditionalValue = this.ShouldMergeDueToFileCountPolicy(mergeTable);
                if (fileCountMergeConditionalValue.HasValue)
                {
                    mergeList = fileCountMergeConditionalValue.Value;
#if !DotNetCoreClr
                    FabricEvents.Events.MergePolicy(this.traceType, MergePolicy.FileCount.ToString());
#endif
                    return(true);
                }
            }

            if (this.IsMergePolicyEnabled(MergePolicy.InvalidEntries) || this.IsMergePolicyEnabled(MergePolicy.DeletedEntries))
            {
                mergeList = this.GetMergeList(mergeTable);
                if (mergeList.Count >= this.MergeFilesCountThreshold)
                {
#if !DotNetCoreClr
                    FabricEvents.Events.MergePolicy(this.traceType, "MergePolicy.Invalid/DeletedEntries");
#endif
                    return(true);
                }
            }

            mergeList = null;
            return(false);
        }
Exemplo n.º 6
0
        internal static async Task PopulateMetadataAsync(MetadataTable metadataTable, string path, string traceType)
        {
            // MCoskun: Unit test only API hence uses default IO priority.
            using (var filestream = FabricFile.Open(path, FileMode.Open, FileAccess.Read, FileShare.Read, 4096, FileOptions.Asynchronous))
            {
                await PopulateMetadataAsync(metadataTable, filestream, traceType);

                metadataTable.MetadataFileSize = filestream.Length;
            }
        }
Exemplo n.º 7
0
        public static async Task WriteAsync(MetadataTable metadataTable, string path)
        {
            // MCoskun: Default IoPriorityHint is used.
            // Reason: Used during Perform checkpoint to write the new metadata file. Perform checkpoint should complete as fast as possible.
            using (var filestream = FabricFile.Open(path, FileMode.Create, FileAccess.Write, FileShare.Write, 4096, FileOptions.Asynchronous))
            {
                await WriteAsync(metadataTable, filestream).ConfigureAwait(false);

                metadataTable.MetadataFileSize = filestream.Length;
            }
        }
Exemplo n.º 8
0
        public static async Task OpenAsync(MetadataTable metadataTable, string path, string traceType)
        {
            // assert count is zero.
            Diagnostics.Assert(metadataTable.Table.Count == 0, traceType, "Count should be zero");

            // MCoskun: Default IoPriorityHint is used.
            // Reason: Used during recovery, restore and backup. These operations either might be required to reach write quorum or is a customer requested operation.
            using (var filestream = FabricFile.Open(path, FileMode.Open, FileAccess.Read, FileShare.Read, 4096, FileOptions.Asynchronous))
            {
                await PopulateMetadataAsync(metadataTable, filestream, traceType).ConfigureAwait(false);

                metadataTable.MetadataFileSize = filestream.Length;
            }
        }
Exemplo n.º 9
0
        public static async Task WriteAsync(MetadataTable metadataTable, Stream stream)
        {
            // Write all metadata to disk.
            var properties = await WriteDiskMetadataAsync(metadataTable.Table, stream).ConfigureAwait(false);

            properties.CheckpointLSN = metadataTable.CheckpointLSN;

            // Write the Properties.
            var propertiesHandle = await FileBlock.WriteBlockAsync(stream, (sectionWriter) => properties.Write(sectionWriter)).ConfigureAwait(false);

            // Write the Footer.
            var footer = new FileFooter(propertiesHandle, FileVersion);
            await FileBlock.WriteBlockAsync(stream, (sectionWriter) => footer.Write(sectionWriter)).ConfigureAwait(false);

            // Finally, flush to disk.
            await stream.FlushAsync().ConfigureAwait(false);
        }
Exemplo n.º 10
0
        internal bool ShouldMergeForSizeOnDiskPolicy(MetadataTable mergeTable)
        {
            long totalSize             = 0;
            long totalRecoverableBytes = 0;

            if (mergeTable == null || mergeTable.Table == null)
            {
                return(false);
            }
            foreach (var fileMetadata in mergeTable.Table.Values)
            {
                totalSize += fileMetadata.GetFileSize();

                totalRecoverableBytes += GetInvalidDeleteEntrySizeInBytes(fileMetadata);
            }

            var percentOfDeletedInvalidBytes = (int)(totalRecoverableBytes * 100.0f / totalSize);

            return(percentOfDeletedInvalidBytes >= this.MaxPercentOfDeletedInvalidBytes);
        }
Exemplo n.º 11
0
        /// <summary>
        /// Decides whether files should be merged and which files according to File Count Merge Policy
        /// </summary>
        /// <param name="mergeTable">Current metadata table.</param>
        /// <returns>Conditional value whose value is the list of files to be merged.</returns>
        internal ConditionalValue <List <uint> > ShouldMergeDueToFileCountPolicy(MetadataTable mergeTable)
        {
            // If total number of checkpoint files is below the merge threshold, merge is not required.
            if (mergeTable.Table.Count < this.fileCountMergeConfiguration.FileCountMergeThreshold)
            {
                return(DoNotMergeConditionalValue);
            }

            // Following code assumes that there is one ShouldMerge call at a time.
            this.AssertIfMapIsNotClean();

            foreach (var item in mergeTable.Table)
            {
                uint         fileId       = item.Key;
                FileMetadata fileMetadata = item.Value;
                long         fileSize     = fileMetadata.GetFileSize();

                ushort fileType   = this.fileCountMergeConfiguration.GetFileType(fileSize);
                bool   listExists = this.fileTypeToMergeList.ContainsKey(fileType);
                if (listExists == false)
                {
                    this.fileTypeToMergeList.Add(fileType, new List <uint>());
                }

                this.fileTypeToMergeList[fileType].Add(fileId);
                if (this.fileTypeToMergeList[fileType].Count == this.fileCountMergeConfiguration.FileCountMergeThreshold)
                {
                    var  mergeList = this.fileTypeToMergeList[fileType];
                    bool isRemoved = this.fileTypeToMergeList.Remove(fileType);
                    Diagnostics.Assert(isRemoved, this.traceType, "Remove must be successful.");
                    this.CleanMap();
                    return(new ConditionalValue <List <uint> >(true, mergeList));
                }
            }

            this.CleanMap();
            return(DoNotMergeConditionalValue);
        }
Exemplo n.º 12
0
        /// <summary>
        /// Determine which file ids should be merged.
        /// </summary>
        /// <param name="mergeTable">Table of file metadata tables</param>
        /// <returns>List of fileIds to be merged</returns>
        public List <uint> GetMergeList(MetadataTable mergeTable)
        {
            var mergeFileIds = new List <uint>();

            bool invalidEntriesEnabled = this.IsMergePolicyEnabled(MergePolicy.InvalidEntries);
            bool deletedEntriesEnabled = this.IsMergePolicyEnabled(MergePolicy.DeletedEntries);

            var mergeSet = new HashSet <uint>();

            foreach (var item in mergeTable.Table)
            {
                if (invalidEntriesEnabled && IsFileQualifiedForInvalidEntriesMergePolicy(item))
                {
                    mergeSet.Add(item.Key);
                }
                else if (deletedEntriesEnabled && IsFileQualifiedForDeletedEntriesMergePolicy(item))
                {
                    mergeSet.Add(item.Key);
                }
            }

            return(new List <uint>(mergeSet));
        }
Exemplo n.º 13
0
        public AsyncEnumerable(
            bool isValueAReferenceType,
            ReadMode readMode,
            IEnumerable <TKey> keyEnumerables,
            IReadableStoreComponent <TKey, TVersionedItem <TValue> > differentState,
            IReadableStoreComponent <TKey, TVersionedItem <TValue> > consolidatedState,
            MetadataTable currentMetadataTable,
            LoadValueCounter loadValueCounter,
            IStateSerializer <TValue> valueSerializer,
            string traceType)
        {
            this.traceType             = traceType;
            this.isValueAReferenceType = isValueAReferenceType;
            this.readMode             = readMode;
            this.keyEnumerable        = keyEnumerables;
            this.differentState       = differentState;
            this.consolidatedState    = consolidatedState;
            this.currentMetadataTable = currentMetadataTable;
            this.loadValueCounter     = loadValueCounter;
            this.valueSerializer      = valueSerializer;

            this.isInvalidated = false;
        }
Exemplo n.º 14
0
        public bool ShouldPerformGDPRMerge(MetadataTable mergeTable, out List <uint> mergeList)
        {
            var mergeSet = new HashSet <uint>();

            var  currentDateTime             = DateTime.UtcNow;
            var  cutOffTimeStamp             = currentDateTime.Subtract(TimeSpan.FromTicks(GDPRMergeThresholdInTicks)).Ticks;
            long latestDeletedEntryTimeStamp = FileMetadata.InvalidTimeStamp;

            if (this.IsMergePolicyEnabled(MergePolicy.GDPR))
            {
                foreach (var item in mergeTable.Table)
                {
                    var fileMetadata = item.Value;
                    if (fileMetadata.NumberOfDeletedEntries > 0)
                    {
                        if (fileMetadata.OldestDeletedEntryTimeStamp <= cutOffTimeStamp)
                        {
                            mergeSet.Add(item.Key);

                            // Selected checkpoint file could have other deleted entries eligible for removal
                            if (fileMetadata.LatestDeletedEntryTimeStamp > latestDeletedEntryTimeStamp)
                            {
                                latestDeletedEntryTimeStamp = fileMetadata.LatestDeletedEntryTimeStamp;
                            }
                        }
                    }

                    /*
                     * It is possible in the same differential set after a key is deleted it is added back
                     * again. In this case deleted entry is then not written to checkpoint files instead
                     * entries in old checkpoint file are invalidated. In order to delete those values in old
                     * checkpoint we add following condition. Invalid entry currently does not move to newer
                     * checkpoint files on merge.
                     */
                    if (fileMetadata.NumberOfInvalidEntries > 0 && fileMetadata.TimeStamp <= cutOffTimeStamp)
                    {
                        mergeSet.Add(item.Key);
                    }
                }

                if (latestDeletedEntryTimeStamp != FileMetadata.InvalidTimeStamp)
                {
                    foreach (var item in mergeTable.Table)
                    {
                        var fileMetadata = item.Value;
                        if (fileMetadata.TimeStamp <= latestDeletedEntryTimeStamp)
                        {
                            mergeSet.Add(item.Key);
                        }
                    }
                }

                if (mergeSet.Count > 0)
                {
                    mergeList = new List <uint>(mergeSet);
                    return(true);
                }
            }

            mergeList = null;
            return(false);
        }
Exemplo n.º 15
0
        /// <summary>
        /// Copy operations are returned.
        /// </summary>
        /// <param name="cancellationToken"></param>
        /// <returns></returns>
        public async Task <OperationData> GetNextAsync(CancellationToken cancellationToken)
        {
            var directory = this.copyProvider.WorkingDirectory;

            // Take a snapshot of the metadata table on first Copy operation.
            if (this.copySnapshotOfMetadataTable == null)
            {
                this.copySnapshotOfMetadataTable = await this.copyProvider.GetMetadataTableAsync().ConfigureAwait(false);

                Diagnostics.Assert(
                    this.copySnapshotOfMetadataTable != null,
                    this.traceType,
                    "IStoreCopyProvider.GetMetadataTableAsync() returned a null metadata table.");

                this.copySnapshotOfMetadataTableEnumerator = this.copySnapshotOfMetadataTable.Table.GetEnumerator();
            }

            // Send the copy protocol version first.
            if (this.copyStage == CopyStage.Version)
            {
                FabricEvents.Events.CopyAsync(this.traceType, string.Format(System.Globalization.CultureInfo.InvariantCulture, "starting. directory:{0}", directory));

                // Next copy stage.
                this.copyStage = CopyStage.MetadataTable;

                using (var memoryStream = new MemoryStream(sizeof(int) + sizeof(byte)))
                    using (var writer = new BinaryWriter(memoryStream))
                    {
                        // Write the copy protocol version number.
                        writer.Write(CopyManager.CopyProtocolVersion);

                        // Write a byte indicating the operation type is the copy protocol version.
                        writer.Write((byte)TStoreCopyOperation.Version);

                        // Send the version operation data.
                        FabricEvents.Events.CopyAsync(
                            this.traceType,
                            string.Format(System.Globalization.CultureInfo.InvariantCulture, "Version. version:{0} bytes:{1}", CopyManager.CopyProtocolVersion, memoryStream.Position));
                        return(new OperationData(new ArraySegment <byte>(memoryStream.GetBuffer(), 0, checked ((int)memoryStream.Position))));
                    }
            }

            // Send the metadata table next.
            if (this.copyStage == CopyStage.MetadataTable)
            {
                // Consistency checks.
                Diagnostics.Assert(this.copySnapshotOfMetadataTable != null, this.traceType, "Unexpected copy error. Master table to be copied is null.");

                // Next copy stage.
                if (this.copySnapshotOfMetadataTableEnumerator.MoveNext())
                {
                    this.copyStage = CopyStage.KeyFile;
                }
                else
                {
                    this.copyStage = CopyStage.Complete;
                }

                using (var memoryStream = new MemoryStream())
                {
                    // Write the full metadata table (this will typically be small - even with 1000 tracked files, this will be under 64 KB).
                    await MetadataManager.WriteAsync(this.copySnapshotOfMetadataTable, memoryStream).ConfigureAwait(false);

                    using (var writer = new BinaryWriter(memoryStream))
                    {
                        // Write a byte indicating the operation type is the full metadata table.
                        writer.Write((byte)TStoreCopyOperation.MetadataTable);

                        // Send the metadata table operation data.
                        FabricEvents.Events.CopyAsync(
                            this.traceType,
                            string.Format(System.Globalization.CultureInfo.InvariantCulture, "Metadata table. directory:{0} bytes:{1} Total number of checkpoint files:{2}", directory, memoryStream.Position, this.copySnapshotOfMetadataTable.Table.Count));
                        return(new OperationData(new ArraySegment <byte>(memoryStream.GetBuffer(), 0, checked ((int)memoryStream.Position))));
                    }
                }
            }

            // Send the key file.
            if (this.copyStage == CopyStage.KeyFile)
            {
                var bytesRead = 0;

                var shortFileName     = this.copySnapshotOfMetadataTableEnumerator.Current.Value.FileName;
                var dataFileName      = Path.Combine(directory, shortFileName);
                var keyCheckpointFile = dataFileName + KeyCheckpointFile.FileExtension;

                // If we don't have the current file stream opened, this is the first table chunk.
                if (this.currentFileStream == null)
                {
                    Diagnostics.Assert(
                        FabricFile.Exists(keyCheckpointFile),
                        this.traceType,
                        "Unexpected copy error. Expected file does not exist: {0}",
                        keyCheckpointFile);

                    // Open the file with asynchronous flag and 4096 cache size (C# default).
                    FabricEvents.Events.CopyAsync(this.traceType, string.Format(System.Globalization.CultureInfo.InvariantCulture, "Opening key file. file:{0}", shortFileName));

                    // MCoskun: Default IoPriorityHint is used.
                    // Reason: We do not know whether this copy might be used to build a replica that is or will be required for write quorum.
                    this.currentFileStream = FabricFile.Open(keyCheckpointFile, FileMode.Open, FileAccess.Read, FileShare.Read, 4096, FileOptions.Asynchronous);

                    // Send the start of file operation data.
                    this.perfCounterWriter.StartMeasurement();
                    bytesRead = await this.currentFileStream.ReadAsync(this.copyDataBuffer, 0, CopyChunkSize, cancellationToken).ConfigureAwait(false);

                    this.perfCounterWriter.StopMeasurement(bytesRead);

                    using (var stream = new MemoryStream(this.copyDataBuffer, writable: true))
                        using (var writer = new BinaryWriter(stream))
                        {
                            stream.Position = bytesRead;
                            writer.Write((int)this.copySnapshotOfMetadataTableEnumerator.Current.Value.FileId);
                        }

                    this.copyDataBuffer[bytesRead + sizeof(int)] = (byte)TStoreCopyOperation.StartKeyFile;
                    FabricEvents.Events.CopyAsync(
                        this.traceType,
                        string.Format(System.Globalization.CultureInfo.InvariantCulture, "StartKeyFile. file:{0} bytes:{1} totalFileSize:{2}", shortFileName, bytesRead + sizeof(int) + 1, this.currentFileStream.Length));
                    return(new OperationData(new ArraySegment <byte>(this.copyDataBuffer, 0, bytesRead + sizeof(int) + 1)));
                }

                // The start of the current file has been sent.  Check if there are more chunks to be sent (if the stream is at the end, this will return zero).
                this.perfCounterWriter.StartMeasurement();
                bytesRead = await this.currentFileStream.ReadAsync(this.copyDataBuffer, 0, CopyChunkSize, cancellationToken).ConfigureAwait(false);

                this.perfCounterWriter.StopMeasurement(bytesRead);
                if (bytesRead > 0)
                {
                    // Send the partial table file operation data.
                    this.copyDataBuffer[bytesRead] = (byte)TStoreCopyOperation.WriteKeyFile;
                    FabricEvents.Events.CopyAsync(this.traceType, string.Format(System.Globalization.CultureInfo.InvariantCulture, "WriteKeyFile. level:{0} bytes:{1} Position:{2}", shortFileName, bytesRead + 1, this.currentFileStream.Position));
                    return(new OperationData(new ArraySegment <byte>(this.copyDataBuffer, 0, bytesRead + 1)));
                }

                // There is no more data in the current file.  Send the end of file marker, and prepare for the next copy stage.
                this.currentFileStream.Dispose();
                this.currentFileStream = null;

                // Now move to the value file.
                this.copyStage = CopyStage.ValueFile;

                // Send the end of file operation data.
                var endFileData = new ArraySegment <byte>(new[] { (byte)TStoreCopyOperation.EndKeyFile });
                FabricEvents.Events.CopyAsync(this.traceType, string.Format(System.Globalization.CultureInfo.InvariantCulture, "EndKeyFile. file:{0}", shortFileName));
                return(new OperationData(endFileData));
            }

            if (this.copyStage == CopyStage.ValueFile)
            {
                var bytesRead = 0;

                var shortFileName       = this.copySnapshotOfMetadataTableEnumerator.Current.Value.FileName;
                var dataFileName        = Path.Combine(directory, shortFileName);
                var valueCheckpointFile = dataFileName + ValueCheckpointFile.FileExtension;

                // If we don't have the current file opened, this is the first table chunk.
                if (this.currentFileStream == null)
                {
                    Diagnostics.Assert(
                        FabricFile.Exists(valueCheckpointFile),
                        this.traceType,
                        "Unexpected copy error. Expected file does not exist: {0}",
                        valueCheckpointFile);

                    // Open the file with asynchronous flag and 4096 cache size (C# default).
                    FabricEvents.Events.CopyAsync(this.traceType, string.Format(System.Globalization.CultureInfo.InvariantCulture, "Opening value file. directory:{0} file:{1}", directory, shortFileName));

                    // MCoskun: Default IoPriorityHint is used.
                    // Reason: We do not know whether this copy might be used to build a replica that is or will be required for write quorum.
                    this.currentFileStream = FabricFile.Open(
                        valueCheckpointFile,
                        FileMode.Open,
                        FileAccess.Read,
                        FileShare.Read,
                        4096,
                        FileOptions.Asynchronous);

                    // Send the start of file operation data.
                    this.perfCounterWriter.StartMeasurement();
                    bytesRead = await this.currentFileStream.ReadAsync(this.copyDataBuffer, 0, CopyChunkSize, cancellationToken).ConfigureAwait(false);

                    this.perfCounterWriter.StopMeasurement(bytesRead);

                    using (var stream = new MemoryStream(this.copyDataBuffer, writable: true))
                        using (var writer = new BinaryWriter(stream))
                        {
                            stream.Position = bytesRead;
                            writer.Write((int)this.copySnapshotOfMetadataTableEnumerator.Current.Value.FileId);
                        }

                    this.copyDataBuffer[bytesRead + sizeof(int)] = (byte)TStoreCopyOperation.StartValueFile;
                    FabricEvents.Events.CopyAsync(
                        this.traceType,
                        string.Format(System.Globalization.CultureInfo.InvariantCulture, "StartValueFile. file:{0} bytes:{1} totalFileSize:{2}", shortFileName, bytesRead + sizeof(int) + 1, this.currentFileStream.Length));
                    return(new OperationData(new ArraySegment <byte>(this.copyDataBuffer, 0, bytesRead + sizeof(int) + 1)));
                }

                // The start of the current file was sent.  Check if there are more chunks to be sent (if the stream is at the end, this will return zero).
                this.perfCounterWriter.StartMeasurement();
                bytesRead = await this.currentFileStream.ReadAsync(this.copyDataBuffer, 0, CopyChunkSize, cancellationToken);

                this.perfCounterWriter.StopMeasurement(bytesRead);

                if (bytesRead > 0)
                {
                    this.copyDataBuffer[bytesRead] = (byte)TStoreCopyOperation.WriteValueFile;
                    FabricEvents.Events.CopyAsync(this.traceType, string.Format(System.Globalization.CultureInfo.InvariantCulture, "WriteValueFile. file:{0} bytes:{1} Position:{2}", shortFileName, bytesRead + 1, this.currentFileStream.Position));
                    return(new OperationData(new ArraySegment <byte>(this.copyDataBuffer, 0, bytesRead + 1)));
                }

                // There is no more data in the current file.  Send the end of file marker, and prepare for the next copy stage.
                this.currentFileStream.Dispose();
                this.currentFileStream = null;

                // Check if there are more files.
                if (this.copySnapshotOfMetadataTableEnumerator.MoveNext())
                {
                    // More files.
                    this.copyStage = CopyStage.KeyFile;
                }
                else
                {
                    // No more files to be sent.
                    this.copyStage = CopyStage.Complete;
                }

                // Send the end of file operation data.
                var endFileData = new ArraySegment <byte>(new[] { (byte)TStoreCopyOperation.EndValueFile });
                FabricEvents.Events.CopyAsync(this.traceType, string.Format(System.Globalization.CultureInfo.InvariantCulture, "EndValueFile. file:{0}", shortFileName));
                return(new OperationData(endFileData));
            }

            // Finally, send the "copy completed" marker.
            if (this.copyStage == CopyStage.Complete)
            {
                this.perfCounterWriter.UpdatePerformanceCounters();

                // Next copy stage.
                this.copyStage = CopyStage.None;

                // Indicate the copy operation is complete.
                FabricEvents.Events.CopyAsync(
                    this.traceType,
                    string.Format(
                        System.Globalization.CultureInfo.InvariantCulture,
                        "completed. directory:{0}, diskread:{1}bytes/sec",
                        directory,
                        this.perfCounterWriter.AvgDiskTransferBytesPerSec));
                var copyCompleteData = new ArraySegment <byte>(new[] { (byte)TStoreCopyOperation.Complete });
                return(new OperationData(copyCompleteData));
            }

            // Finished copying.  Dispose immediately to release resources/locks.
            if (!this.disposed)
            {
                ((IDisposable)this).Dispose();
            }

            return(null);
        }
Exemplo n.º 16
0
 public override Task <TValue> GetValueAsync(MetadataTable metadataTable, IStateSerializer <TValue> valueSerializer, bool isValueARefernceType, ReadMode readMode, LoadValueCounter valueCounter, CancellationToken cancellationToken, string traceType, bool duringRecovery = false)
 {
     throw new InvalidOperationException(SR.Error_TDeletedItem_NoValue);
 }