private async Task <string> MultiPartUpload(string archiveDescription, Stream stream)
        {
            var streamLength = stream.Length;

            if (streamLength > MultiPartUploadLimitInBytes)
            {
                throw new InvalidOperationException(@"Can't upload more than 40TB to Amazon Glacier, " +
                                                    $"current upload size: {new Size(streamLength).HumaneSize}");
            }

            UploadProgress?.SetTotal(streamLength);
            UploadProgress?.ChangeType(UploadType.Chunked);

            // using a chunked upload we can upload up to 10,000 chunks, 4GB max each
            // we limit every chunk to a minimum of 128MB
            // constraints: the part size must be a megabyte(1024KB)
            // multiplied by a power of 2—for example,
            // 1048576(1MB), 2097152(2MB), 4194304(4MB), 8388608(8 MB), and so on.
            // the minimum allowable part size is 1MB and the maximum is 4GB(4096 MB).
            var        maxLengthPerPart      = Math.Max(MinOnePartUploadSizeLimitInBytes, stream.Length / 10000);
            const long maxPartLength         = 4L * 1024 * 1024 * 1024; // 4GB
            var        lengthPerPartPowerOf2 = Math.Min(GetNextPowerOf2(maxLengthPerPart), maxPartLength);

            var baseUrl  = $"{GetUrl()}/multipart-uploads";
            var uploadId = await GetUploadId(baseUrl, archiveDescription, lengthPerPartPowerOf2);

            var client = GetClient(TimeSpan.FromDays(7));

            var uploadUrl = $"{baseUrl}/{uploadId}";
            var fullStreamPayloadTreeHash = RavenAwsHelper.CalculatePayloadTreeHash(stream);

            try
            {
                while (stream.Position < streamLength)
                {
                    var length = Math.Min(lengthPerPartPowerOf2, streamLength - stream.Position);
                    await UploadPart(stream, client, uploadUrl, length, retryCount : 0);
                }

                return(await CompleteMultiUpload(uploadUrl, client, streamLength, fullStreamPayloadTreeHash));
            }
            catch (Exception)
            {
                await AbortMultiUpload(uploadUrl, client);

                throw;
            }
            finally
            {
                UploadProgress?.ChangeState(UploadState.Done);
            }
        }
Example #2
0
        private async Task MultiPartUpload(string key, Stream stream, Dictionary <string, string> metadata)
        {
            var streamLength = stream.Length;

            if (streamLength > MultiPartUploadLimitInBytes)
            {
                throw new InvalidOperationException(@"Can't upload more than 5TB to Amazon S3, " +
                                                    $"current upload size: {new Size(streamLength).HumaneSize}");
            }

            UploadProgress?.SetTotal(streamLength);
            UploadProgress?.ChangeType(UploadType.Chunked);

            var baseUrl  = $"{GetUrl()}/{key}";
            var uploadId = await GetUploadId(baseUrl, metadata);

            var client = GetClient(TimeSpan.FromDays(7));
            var partNumbersWithEtag = new List <Tuple <int, string> >();
            var partNumber          = 0;
            var completeUploadUrl   = $"{baseUrl}?uploadId={uploadId}";

            // using a chunked upload we can upload up to 1000 chunks, 5GB max each
            // we limit every chunk to a minimum of 100MB
            var maxLengthPerPart = Math.Max(MinOnePartUploadSizeLimitInBytes, stream.Length / 1000);

            try
            {
                while (stream.Position < streamLength)
                {
                    var length = Math.Min(maxLengthPerPart, streamLength - stream.Position);
                    var url    = $"{baseUrl}?partNumber={++partNumber}&uploadId={uploadId}";

                    var etag = await UploadPart(stream, client, url, length, retryCount : 0);

                    partNumbersWithEtag.Add(new Tuple <int, string>(partNumber, etag));
                }

                await CompleteMultiUpload(completeUploadUrl, client, partNumbersWithEtag);
            }
            catch (Exception)
            {
                await AbortMultiUpload(client, completeUploadUrl);

                throw;
            }
            finally
            {
                UploadProgress?.ChangeState(UploadState.Done);
            }
        }
Example #3
0
        private async Task PutBlockApi(string key, Stream stream, Dictionary <string, string> metadata)
        {
            var streamLength = stream.Length;

            if (streamLength > TotalBlocksSizeLimitInBytes)
            {
                throw new InvalidOperationException(@"Can't upload more than 4.75TB to Azure, " +
                                                    $"current upload size: {new Size(streamLength).HumaneSize}");
            }

            var blockNumber = 0;
            var blockIds    = new List <string>();
            var baseUrl     = _serverUrlForContainer + "/" + key;
            var client      = GetClient(TimeSpan.FromDays(7));

            UploadProgress?.SetTotal(streamLength);
            UploadProgress?.ChangeType(UploadType.Chunked);

            try
            {
                while (stream.Position < streamLength)
                {
                    var blockNumberInBytes = BitConverter.GetBytes(blockNumber++);
                    var blockIdString      = Convert.ToBase64String(blockNumberInBytes);
                    blockIds.Add(blockIdString);

                    var length           = Math.Min(OnePutBlockSizeLimitInBytes, streamLength - stream.Position);
                    var baseUrlForUpload = baseUrl + "?comp=block&blockid=";
                    var url = baseUrlForUpload + WebUtility.UrlEncode(blockIdString);

                    await PutBlock(stream, client, url, length, retryCount : 0);
                }

                // put block list
                await PutBlockList(baseUrl, client, blockIds, metadata);
            }
            finally
            {
                UploadProgress?.ChangeState(UploadState.Done);
            }
        }