public DatabaseDocumentActions(DocumentDatabase database, BuildVersionType buildType, bool isRevision, Logger log) { _database = database; _buildType = buildType; _isRevision = isRevision; _log = log; _enqueueThreshold = new Sparrow.Size( (sizeof(int) == IntPtr.Size || database.Configuration.Storage.ForceUsing32BitsPager) ? 2 : 32, SizeUnit.Megabytes); _command = new MergedBatchPutCommand(database, buildType, log) { IsRevision = isRevision }; }
public void PutBlob(string blobName, Stream stream, Dictionary <string, string> metadata) { AsyncHelpers.RunSync(TestConnectionAsync); var streamSize = new Size(stream.Length, SizeUnit.Bytes); if (streamSize > TotalBlocksSizeLimit) { throw new InvalidOperationException(@"Can't upload more than 4.75TB to Azure, " + $"current upload size: {streamSize}"); } var streamLength = streamSize.GetValue(SizeUnit.Bytes); try { _progress?.UploadProgress.SetTotal(streamLength); var maxSingleBlockSizeInBytes = MaxSingleBlockSize.GetValue(SizeUnit.Bytes); if (streamLength > maxSingleBlockSizeInBytes) { _progress?.UploadProgress.ChangeType(UploadType.Chunked); } var client = _client.GetBlobClient(blobName); client.Upload(stream, metadata: metadata, progressHandler: this, transferOptions: new StorageTransferOptions { MaximumTransferSize = maxSingleBlockSizeInBytes }, cancellationToken: _cancellationToken); } finally { _progress?.UploadProgress.ChangeState(UploadState.Done); } }
private void CreateUploadTaskIfNeeded <S, T>(S settings, Action <S, FileStream, Progress> uploadToServer, T uploadStatus, string targetName) where S : BackupSettings where T : CloudUploadStatus { if (PeriodicBackupConfiguration.CanBackupUsing(settings) == false) { return; } Debug.Assert(uploadStatus != null); var localUploadStatus = uploadStatus; var thread = PoolOfThreads.GlobalRavenThreadPool.LongRunning(_ => { try { Thread.CurrentThread.Priority = ThreadPriority.BelowNormal; NativeMemory.EnsureRegistered(); using (localUploadStatus.UpdateStats(_isFullBackup)) using (var fileStream = File.OpenRead(_settings.BackupPath)) { var uploadProgress = localUploadStatus.UploadProgress; try { localUploadStatus.Skipped = false; uploadProgress.ChangeState(UploadState.PendingUpload); uploadProgress.SetTotal(fileStream.Length); AddInfo($"Starting the upload of backup file to {targetName}."); var bytesPutsPerSec = new MeterMetric(); long lastUploadedInBytes = 0; var totalToUpload = new Size(uploadProgress.TotalInBytes, SizeUnit.Bytes).ToString(); var sw = Stopwatch.StartNew(); var progress = new Progress(uploadProgress) { OnUploadProgress = () => { if (sw.ElapsedMilliseconds <= 1000) { return; } var totalUploadedInBytes = uploadProgress.UploadedInBytes; bytesPutsPerSec.MarkSingleThreaded(totalUploadedInBytes - lastUploadedInBytes); lastUploadedInBytes = totalUploadedInBytes; var uploaded = new Size(totalUploadedInBytes, SizeUnit.Bytes); uploadProgress.BytesPutsPerSec = bytesPutsPerSec.MeanRate; AddInfo($"Uploaded: {uploaded} / {totalToUpload}"); sw.Restart(); } }; uploadToServer(settings, fileStream, progress); AddInfo($"Total uploaded: {totalToUpload}, took: {MsToHumanReadableString(uploadProgress.UploadTimeInMs)}"); } finally { uploadProgress.ChangeState(UploadState.Done); } } } catch (Exception e) { var extracted = e.ExtractSingleInnerException(); var error = $"Failed to upload the backup file to {targetName}."; Exception exception = null; if (extracted is OperationCanceledException) { // shutting down or HttpClient timeout exception = TaskCancelToken.Token.IsCancellationRequested ? extracted : new TimeoutException(error, e); } localUploadStatus.Exception = (exception ?? e).ToString(); _exceptions.Add(exception ?? new InvalidOperationException(error, e)); } }, null, $"Upload backup file of database '{_settings.DatabaseName}' to {targetName} (task: '{_settings.TaskName}')"); _threads.Add(thread); }
private void CreateUploadTaskIfNeeded <S, T>( S settings, List <Task> tasks, string backupPath, bool isFullBackup, Func <S, FileStream, Progress, Task> uploadToServer, T uploadStatus, Action <IOperationProgress> onProgress) where S : BackupSettings where T : CloudUploadStatus { if (PeriodicBackupConfiguration.CanBackupUsing(settings) == false) { return; } Debug.Assert(uploadStatus != null); var localUploadStatus = uploadStatus; tasks.Add(Task.Run(async() => { using (localUploadStatus.UpdateStats(isFullBackup)) using (var fileStream = File.OpenRead(backupPath)) { var uploadProgress = localUploadStatus.UploadProgress; localUploadStatus.Skipped = false; uploadProgress.ChangeState(UploadState.PendingUpload); uploadProgress.SetTotal(fileStream.Length); AddInfo($"Starting {uploadStatus.GetType().AssemblyQualifiedName}", onProgress); try { var bytesPutsPerSec = new MeterMetric(); long lastUploadedInBytes = 0; var totalToUpload = new Sparrow.Size(uploadProgress.TotalInBytes, SizeUnit.Bytes).ToString(); var sw = Stopwatch.StartNew(); var progress = new Progress(uploadProgress) { OnUploadProgress = () => { if (sw.ElapsedMilliseconds <= 1000) { return; } var totalUploadedInBytes = uploadProgress.UploadedInBytes; bytesPutsPerSec.MarkSingleThreaded(totalUploadedInBytes - lastUploadedInBytes); lastUploadedInBytes = totalUploadedInBytes; var uploaded = new Sparrow.Size(totalUploadedInBytes, SizeUnit.Bytes); uploadProgress.BytesPutsPerSec = bytesPutsPerSec.MeanRate; AddInfo($"Uploaded: {uploaded} / {totalToUpload}", onProgress); sw.Restart(); } }; await uploadToServer(settings, fileStream, progress); AddInfo($"Total uploaded: {totalToUpload}, " + $"took: {MsToHumanReadableString(uploadProgress.UploadTimeInMs)}", onProgress); } catch (OperationCanceledException e) { // shutting down localUploadStatus.Exception = e.ToString(); throw; } catch (Exception e) { localUploadStatus.Exception = e.ToString(); throw new InvalidOperationException($"Failed to backup to {uploadStatus.GetType().FullName}", e); } finally { uploadProgress.ChangeState(UploadState.Done); } } })); }
public async Task PutObjectAsync(string key, Stream stream, Dictionary <string, string> metadata) { //TestConnection(); var streamSize = new Size(stream.Length, SizeUnit.Bytes); if (streamSize > TotalBlocksSizeLimit) { throw new InvalidOperationException($@"Can't upload more than 5TB to AWS S3, current upload size: {streamSize}"); } var streamLength = streamSize.GetValue(SizeUnit.Bytes); try { _progress?.UploadProgress.SetTotal(streamLength); if (streamSize > MaxUploadPutObject) { _progress?.UploadProgress.ChangeType(UploadType.Chunked); var multipartRequest = new InitiateMultipartUploadRequest { Key = key, BucketName = _bucketName }; FillMetadata(multipartRequest.Metadata, metadata); var initiateResponse = await _client.InitiateMultipartUploadAsync(multipartRequest, _cancellationToken); var partNumber = 1; var partEtags = new List <PartETag>(); while (stream.Position < streamLength) { var leftToUpload = streamLength - stream.Position; var toUpload = Math.Min(MinOnePartUploadSizeLimit.GetValue(SizeUnit.Bytes), leftToUpload); var uploadResponse = await _client .UploadPartAsync(new UploadPartRequest { Key = key, BucketName = _bucketName, InputStream = stream, PartNumber = partNumber++, PartSize = toUpload, UploadId = initiateResponse.UploadId, StreamTransferProgress = (_, args) => { _progress?.UploadProgress.ChangeState(UploadState.Uploading); _progress?.UploadProgress.UpdateUploaded(args.IncrementTransferred); _progress?.OnUploadProgress?.Invoke(); } }, _cancellationToken); partEtags.Add(new PartETag(uploadResponse.PartNumber, uploadResponse.ETag)); } await _client.CompleteMultipartUploadAsync( new CompleteMultipartUploadRequest { UploadId = initiateResponse.UploadId, BucketName = _bucketName, Key = key, PartETags = partEtags }, _cancellationToken); return; } var request = new PutObjectRequest { Key = key, BucketName = _bucketName, InputStream = stream, StreamTransferProgress = (_, args) => { _progress?.UploadProgress.ChangeState(UploadState.Uploading); _progress?.UploadProgress.UpdateUploaded(args.IncrementTransferred); _progress?.OnUploadProgress?.Invoke(); } }; FillMetadata(request.Metadata, metadata); await _client.PutObjectAsync(request, _cancellationToken); } catch (AmazonS3Exception e) { await MaybeHandleExceptionAsync(e); throw; } finally { _progress?.UploadProgress.ChangeState(UploadState.Done); } }