public async Task <string> FinishUploadAsync(UploadJob job, UploadItem item)
        {
            var checksum = TreeHashGenerator.CalculateTreeHash(job.ChunkChecksums);

            //prepare request
            var request = new CompleteMultipartUploadRequest
            {
                UploadId    = job.UploadId,
                ArchiveSize = item.ContentLength.ToString(),
                Checksum    = checksum,
                VaultName   = job.VaultName
            };

            //finish up multipart upload
            var response = await _client.CompleteMultipartUploadAsync(request);

            var achiveId = response.ArchiveId;

            return(achiveId);
        }
Exemple #2
0
        private async Task <string> UploadArchiveAsync(Stream stream, string archiveDescription)
        {
            await TestConnectionAsync();

            var streamSize = new Size(stream.Length, SizeUnit.Bytes);

            if (streamSize > TotalArchiveSizeLimit)
            {
                throw new InvalidOperationException($@"Can't upload more than 40TB to AWS Glacier, current upload size: {streamSize}");
            }

            var streamLength = streamSize.GetValue(SizeUnit.Bytes);

            try
            {
                _progress?.UploadProgress.SetTotal(streamLength);

                if (streamSize > MaxUploadArchiveSize)
                {
                    var partSize = GetPartSize(streamLength);

                    _progress?.UploadProgress.ChangeType(UploadType.Chunked);

                    var initiateResponse = await _client.InitiateMultipartUploadAsync(new InitiateMultipartUploadRequest
                    {
                        ArchiveDescription = archiveDescription,
                        VaultName          = _vaultName,
                        AccountId          = "-",
                        PartSize           = partSize
                    }, _cancellationToken);

                    var partChecksums = new List <string>();

                    var currentPosition = 0L;
                    while (stream.Position < streamLength)
                    {
                        var partStream   = GlacierUtils.CreatePartStream(stream, partSize);
                        var partChecksum = TreeHashGenerator.CalculateTreeHash(partStream);

                        partChecksums.Add(partChecksum);

                        var uploadRequest = new UploadMultipartPartRequest
                        {
                            UploadId  = initiateResponse.UploadId,
                            VaultName = _vaultName,
                            AccountId = "-",
                            Body      = partStream,
                            StreamTransferProgress = (_, args) =>
                            {
                                _progress?.UploadProgress.ChangeState(UploadState.Uploading);
                                _progress?.UploadProgress.UpdateUploaded(args.IncrementTransferred);
                                _progress?.OnUploadProgress?.Invoke();
                            },
                            Checksum = partChecksum
                        };

                        uploadRequest.SetRange(currentPosition, currentPosition + partStream.Length - 1);

                        await _client.UploadMultipartPartAsync(uploadRequest, _cancellationToken);

                        currentPosition += partStream.Length;
                    }

                    var completeResponse = await _client.CompleteMultipartUploadAsync(new CompleteMultipartUploadRequest
                    {
                        AccountId   = "-",
                        VaultName   = _vaultName,
                        ArchiveSize = streamLength.ToString(),
                        UploadId    = initiateResponse.UploadId,
                        Checksum    = TreeHashGenerator.CalculateTreeHash(partChecksums)
                    }, _cancellationToken);

                    return(completeResponse.ArchiveId);
                }

                var response = await _client.UploadArchiveAsync(new UploadArchiveRequest
                {
                    AccountId          = "-",
                    ArchiveDescription = archiveDescription,
                    Body      = stream,
                    VaultName = _vaultName,
                    StreamTransferProgress = (_, args) =>
                    {
                        _progress?.UploadProgress.ChangeState(UploadState.Uploading);
                        _progress?.UploadProgress.UpdateUploaded(args.IncrementTransferred);
                        _progress?.OnUploadProgress?.Invoke();
                    },
                    Checksum = TreeHashGenerator.CalculateTreeHash(stream)
                }, _cancellationToken);

                return(response.ArchiveId);
            }
            finally
            {
                _progress?.UploadProgress.ChangeState(UploadState.Done);
            }
        }
        private static void HandleUpload(string[] args)
        {
            var region = RegionEndpoint.EnumerableAllRegions.SingleOrDefault(reg => reg.SystemName == args[2]);

            if (args.Length < 6 || region == null)
            {
                Console.WriteLine($"args should be aws_key aws_secret region vault_name description filename");
                return;
            }
            var aws_key     = args[0];
            var aws_secret  = args[1];
            var vault_name  = args[3];
            var description = args[4];
            var filename    = args[5];
            var creds       = new BasicAWSCredentials(aws_key, aws_secret);
            var config      = new AmazonGlacierConfig
            {
                RegionEndpoint = region,
                Timeout        = TimeSpan.FromDays(10)
            };
            var client    = new AmazonGlacierClient(creds, config);
            var initReq   = new InitiateMultipartUploadRequest(vault_name, description, PartSize);
            var ts        = new CancellationTokenSource();
            var completed = 0;
            var started   = 0;

            try
            {
                var res      = client.InitiateMultipartUploadAsync(initReq, ts.Token).Result;
                var promises = new List <Task <UploadMultipartPartResponse> >();
                Task <UploadMultipartPartResponse> lastPart = null;
                var  sem        = new SemaphoreSlim(ConcurrencyLimit);
                long totalSize  = 0;
                int  totalParts = 0;
                using (var fs = new FileStream(filename, FileMode.Open))
                {
                    totalSize = fs.Length;
                    Console.WriteLine($"Preparing to upload {ByteSize.FromBytes(totalSize)}");
                    totalParts = (int)(fs.Length / PartSize) + 1;
                    bool noErrors = true;
                    while (noErrors)
                    {
                        sem.Wait();
                        var arr     = new byte[PartSize];
                        var start   = fs.Position;
                        var read    = fs.Read(arr, 0, (int)PartSize);
                        var check   = TreeHasher.ComputeArrayHashString(arr, read);
                        var partReq = new UploadMultipartPartRequest(vault_name,
                                                                     res.UploadId,
                                                                     check,
                                                                     $"bytes {start}-{start + read - 1}/*",
                                                                     new MemoryStream(arr, 0, read));
                        var promise = client.UploadMultipartPartAsync(partReq, ts.Token);
                        Interlocked.Increment(ref started);
                        Console.WriteLine($"Started {started} out of {totalParts}");
                        promise.ContinueWith(tsk =>
                        {
                            if (tsk.IsFaulted)
                            {
                                Console.WriteLine($"Exception encountered: {tsk.Exception.ToString()}");
                                noErrors = false;
                                throw tsk.Exception;
                            }
                            Interlocked.Increment(ref completed);
                            Console.WriteLine($"{completed} out of {totalParts} completed.");
                            sem.Release();
                        });
                        promises.Add(promise);
                        if (read < PartSize || fs.Position >= fs.Length - 1)
                        {
                            lastPart = promise;
                            break;
                        }
                    }
                }

                Task.WaitAll(promises.ToArray());
                using (var fs = new FileStream(filename, FileMode.Open))
                {
                    var check    = TreeHasher.ComputeHashString(fs);
                    var finisher = new CompleteMultipartUploadRequest(vault_name, res.UploadId, totalSize.ToString(), check);
                    Console.WriteLine("Finishing up");
                    Console.WriteLine($"Computed checksum {check}");
                    var result = client.CompleteMultipartUploadAsync(finisher, ts.Token).Result;
                    Console.WriteLine($"Completed: {result.Checksum}");
                    Console.WriteLine($"Calculated: {check}");
                    var match = string.Equals(result.Checksum, check, StringComparison.InvariantCultureIgnoreCase) ? "" : "not ";
                    Console.WriteLine($"Checksums do {match}match.");
                    Console.WriteLine($"Archive ID: {result.ArchiveId} Location: {result.Location}");
                }
            }
            catch (Exception ex)
            {
                Console.WriteLine($"Exception thrown: {ex.GetType().Name} - {ex.Message}");
                Console.WriteLine($"Full exception: {ex.ToString()}");
            }
        }