Example #1
0
            private void HandleBatchOfDocumentsIfNecessary()
            {
                var prevDoneAndHasEnough = _command.Context.AllocatedMemory > Constants.Size.Megabyte && _prevCommandTask.IsCompleted;
                var currentReachedLimit  = _command.Context.AllocatedMemory > _enqueueThreshold.GetValue(SizeUnit.Bytes);

                if (currentReachedLimit == false && prevDoneAndHasEnough == false)
                {
                    return;
                }

                var prevCommand     = _prevCommand;
                var prevCommandTask = _prevCommandTask;

                _prevCommand     = _command;
                _prevCommandTask = _database.TxMerger.Enqueue(_command).AsTask();

                if (prevCommand != null)
                {
                    using (prevCommand)
                    {
                        prevCommandTask.Wait();
                        Debug.Assert(prevCommand.IsDisposed == false,
                                     "we rely on reusing this context on the next batch, so it has to be disposed here");
                    }
                }

                _command = new MergedBatchPutCommand(_database, _buildType, _log)
                {
                    IsRevision = _isRevision
                };
            }
            private void HandleBatchOfRevisionsIfNecessary()
            {
                var prevDoneAndHasEnough = _revisionDeleteCommand.Context.AllocatedMemory > Constants.Size.Megabyte && _prevRevisionCommandTask.IsCompleted;
                var currentReachedLimit  = _revisionDeleteCommand.Context.AllocatedMemory > _enqueueThreshold.GetValue(SizeUnit.Bytes);

                if (currentReachedLimit == false && prevDoneAndHasEnough == false)
                {
                    return;
                }

                var prevCommand     = _prevRevisionDeleteCommand;
                var prevCommandTask = _prevRevisionCommandTask;
                var commandTask     = _database.TxMerger.Enqueue(_revisionDeleteCommand);

                // we ensure that we first enqueue the command to if we
                // fail to do that, we won't be waiting on the previous
                // one
                _prevRevisionDeleteCommand = _revisionDeleteCommand;
                _prevRevisionCommandTask   = commandTask;

                if (prevCommand != null)
                {
                    using (prevCommand)
                    {
                        prevCommandTask.GetAwaiter().GetResult();
                        Debug.Assert(prevCommand.IsDisposed == false,
                                     "we rely on reusing this context on the next batch, so it has to be disposed here");
                    }
                }

                _revisionDeleteCommand = new MergedBatchDeleteRevisionCommand(_database, _log);
            }
Example #3
0
        public void PutBlob(string blobName, Stream stream, Dictionary <string, string> metadata)
        {
            AsyncHelpers.RunSync(TestConnectionAsync);

            var streamSize = new Size(stream.Length, SizeUnit.Bytes);

            if (streamSize > TotalBlocksSizeLimit)
            {
                throw new InvalidOperationException(@"Can't upload more than 4.75TB to Azure, " +
                                                    $"current upload size: {streamSize}");
            }

            var streamLength = streamSize.GetValue(SizeUnit.Bytes);

            try
            {
                _progress?.UploadProgress.SetTotal(streamLength);

                var maxSingleBlockSizeInBytes = MaxSingleBlockSize.GetValue(SizeUnit.Bytes);
                if (streamLength > maxSingleBlockSizeInBytes)
                {
                    _progress?.UploadProgress.ChangeType(UploadType.Chunked);
                }

                var client = _client.GetBlobClient(blobName);

                client.Upload(stream, metadata: metadata, progressHandler: this, transferOptions: new StorageTransferOptions
                {
                    MaximumTransferSize = maxSingleBlockSizeInBytes
                }, cancellationToken: _cancellationToken);
            }
            finally
            {
                _progress?.UploadProgress.ChangeState(UploadState.Done);
            }
        }
Example #4
0
        public async Task PutObjectAsync(string key, Stream stream, Dictionary <string, string> metadata)
        {
            //TestConnection();

            var streamSize = new Size(stream.Length, SizeUnit.Bytes);

            if (streamSize > TotalBlocksSizeLimit)
            {
                throw new InvalidOperationException($@"Can't upload more than 5TB to AWS S3, current upload size: {streamSize}");
            }

            var streamLength = streamSize.GetValue(SizeUnit.Bytes);

            try
            {
                _progress?.UploadProgress.SetTotal(streamLength);

                if (streamSize > MaxUploadPutObject)
                {
                    _progress?.UploadProgress.ChangeType(UploadType.Chunked);

                    var multipartRequest = new InitiateMultipartUploadRequest {
                        Key = key, BucketName = _bucketName
                    };

                    FillMetadata(multipartRequest.Metadata, metadata);

                    var initiateResponse = await _client.InitiateMultipartUploadAsync(multipartRequest, _cancellationToken);

                    var partNumber = 1;
                    var partEtags  = new List <PartETag>();

                    while (stream.Position < streamLength)
                    {
                        var leftToUpload = streamLength - stream.Position;
                        var toUpload     = Math.Min(MinOnePartUploadSizeLimit.GetValue(SizeUnit.Bytes), leftToUpload);

                        var uploadResponse = await _client
                                             .UploadPartAsync(new UploadPartRequest
                        {
                            Key                    = key,
                            BucketName             = _bucketName,
                            InputStream            = stream,
                            PartNumber             = partNumber++,
                            PartSize               = toUpload,
                            UploadId               = initiateResponse.UploadId,
                            StreamTransferProgress = (_, args) =>
                            {
                                _progress?.UploadProgress.ChangeState(UploadState.Uploading);
                                _progress?.UploadProgress.UpdateUploaded(args.IncrementTransferred);
                                _progress?.OnUploadProgress?.Invoke();
                            }
                        }, _cancellationToken);

                        partEtags.Add(new PartETag(uploadResponse.PartNumber, uploadResponse.ETag));
                    }

                    await _client.CompleteMultipartUploadAsync(
                        new CompleteMultipartUploadRequest { UploadId = initiateResponse.UploadId, BucketName = _bucketName, Key = key, PartETags = partEtags },
                        _cancellationToken);

                    return;
                }

                var request = new PutObjectRequest
                {
                    Key                    = key,
                    BucketName             = _bucketName,
                    InputStream            = stream,
                    StreamTransferProgress = (_, args) =>
                    {
                        _progress?.UploadProgress.ChangeState(UploadState.Uploading);
                        _progress?.UploadProgress.UpdateUploaded(args.IncrementTransferred);
                        _progress?.OnUploadProgress?.Invoke();
                    }
                };

                FillMetadata(request.Metadata, metadata);

                await _client.PutObjectAsync(request, _cancellationToken);
            }
            catch (AmazonS3Exception e)
            {
                await MaybeHandleExceptionAsync(e);

                throw;
            }
            finally
            {
                _progress?.UploadProgress.ChangeState(UploadState.Done);
            }
        }