/// <summary> /// upload blob; will continue from where it left off if a previous upload was already in progress /// </summary> /// <param name="uploadUrl">blob upload url</param> /// <param name="blobStream">blob stream to be uploaded; must allow Length, ReadAsync operations. Seek operation must be available for resumed uploads.</param> /// <param name="state"></param> /// <param name="cancellationToken">cancellation token to stop the asynchronous action</param> /// <returns>Returns true if upload is complete; false otherwise</returns> public async Task <bool> Upload(Uri uploadUrl, Stream blobStream, object state, CancellationToken cancellationToken = default) { var headResult = await _tusCore.Head(uploadUrl, cancellationToken); long offset = long.Parse(headResult["Upload-Offset"]); long length = blobStream.Length; var tusUploadFileContext = new TusUploadContext(length, offset, uploadUrl, state); while (!cancellationToken.IsCancellationRequested) { if (offset == length) { UploadFinish?.Invoke(this, tusUploadFileContext); return(true); } if (blobStream.Position != offset) { blobStream.Seek(offset, SeekOrigin.Begin); } int chunkSize = _tusClientOptions.GetChunkUploadSize(this, tusUploadFileContext); chunkSize = (int)Math.Min(chunkSize, length - offset); byte[] buffer = new byte[chunkSize]; var readCount = await blobStream.ReadAsync(buffer, 0, chunkSize); var uploadResult = await _tusCore.Patch(uploadUrl, buffer, offset, cancellationToken); offset = long.Parse(uploadResult["Upload-Offset"]); tusUploadFileContext.UploadedSize = offset; UploadProgress?.Invoke(this, tusUploadFileContext); } return(false); }
public RavenFtpClient(string url, int?port, string userName, string password, string certificateAsBase64, string certificateFileName, UploadProgress uploadProgress = null, CancellationToken?cancellationToken = null) : base(uploadProgress, cancellationToken) { _url = url; _port = port; _userName = userName; _password = password; _certificateAsBase64 = certificateAsBase64; _certificateFileName = certificateFileName; if (_url.StartsWith("ftp://", StringComparison.OrdinalIgnoreCase) == false && _url.StartsWith("ftps://", StringComparison.OrdinalIgnoreCase) == false) { _url = "ftp://" + url; } if (_url.StartsWith("ftps", StringComparison.OrdinalIgnoreCase)) { _useSsl = true; _url = _url.Replace("ftps://", "ftp://", StringComparison.OrdinalIgnoreCase); } if (_url.EndsWith("/") == false) { _url += "/"; } Debug.Assert(_url.StartsWith("ftp://", StringComparison.OrdinalIgnoreCase)); }
private void UploadData(byte[] data) { SerialPort.ReadTimeout = 2000; try { Message?.Invoke(this, "Starting upload"); for (var i = 0; i < Math.Ceiling((double)data.Length / 4096d); i++) { SerialPort.Write(data, i * 4096, Math.Min(4096, data.Length - i * 4096)); SerialPort.BaseStream.Flush(); int d = 0; try { d = SerialPort.ReadByte(); } catch { } if (d != 5) { Message?.Invoke(this, "Upload error"); return; } UploadProgress?.Invoke(this, (double)i / Math.Ceiling((double)data.Length / 4096d)); } } catch { Message?.Invoke(this, "Upload error"); return; } Message?.Invoke(this, "Upload successfull"); }
public void DataLakeUploader_FreshUpload() { var frontEnd = new InMemoryFrontEnd(); var up = CreateParameters(isResume: false); UploadProgress progress = null; var syncRoot = new object(); IProgress <UploadProgress> progressTracker = new Progress <UploadProgress>( (p) => { lock (syncRoot) { //it is possible that these come out of order because of race conditions (multiple threads reporting at the same time); only update if we are actually making progress if (progress == null || progress.UploadedByteCount < p.UploadedByteCount) { progress = p; } } }); var uploader = new DataLakeStoreUploader(up, frontEnd, progressTracker); uploader.Execute(); VerifyFileUploadedSuccessfully(up, frontEnd); VerifyProgressStatus(progress, _largeFileData.Length); }
public async Task UploadFile(string folderName, string fileName, Stream stream) { await TestConnection(); UploadProgress?.SetTotal(stream.Length); UploadProgress?.ChangeState(UploadState.PendingUpload); var url = await CreateNestedFoldersIfNeeded(folderName); url += $"/{fileName}"; var request = CreateFtpWebRequest(url, WebRequestMethods.Ftp.UploadFile, keepAlive: true); var readBuffer = new byte[DefaultBufferSize]; int count; var requestStream = request.GetRequestStream(); while ((count = await stream.ReadAsync(readBuffer, 0, readBuffer.Length)) != 0) { await requestStream.WriteAsync(readBuffer, 0, count); UploadProgress?.UpdateUploaded(count); } requestStream.Flush(); requestStream.Close(); UploadProgress?.ChangeState(UploadState.PendingResponse); var response = await request.GetResponseAsync(); response.Dispose(); UploadProgress?.ChangeState(UploadState.Done); }
internal bool PublishBundleAndCleanUp(string resourceFolder, UploadComplete complete, UploadProgress progress, Stack <RequestHandle> requests) { if (string.IsNullOrEmpty(m_Guid)) { return(false); } m_UploadCompleteCallback = complete; m_UploadProgressCallback = progress; switch (m_Resource.type) { case ResourceType.Scene: requests.Push(m_StorageUser.UploadSceneAssetBundle(resourceFolder, m_Platform.ToString(), m_Guid, OnComplete, OnUploadProgress)); break; case ResourceType.Prefab: requests.Push(m_StorageUser.UploadPrefabAssetBundle(resourceFolder, m_Platform.ToString(), m_Guid, OnComplete, OnUploadProgress)); break; default: Debug.LogWarning($"Trying to upload asset bundle {m_Resource.name} which is of type {m_Resource.type} which is not a Scene or a Prefab"); break; } return(true); }
private void SetIsUploading(bool uploading) { LocalParams.InvokeIfRequired(c => c.Enabled = !uploading); RemoteParams.InvokeIfRequired(c => c.Enabled = !uploading); DoUpload.InvokeIfRequired(c => c.Enabled = !uploading); UploadProgress.InvokeIfRequired(c => c.Visible = uploading); }
private void beginRequestOutput() { try { using (Stream requestStream = request.GetRequestStream()) { reportForwardProgress(); requestBody.Position = 0; byte[] buff = new byte[buffer_size]; int read; int totalRead = 0; while ((read = requestBody.Read(buff, 0, buffer_size)) > 0) { reportForwardProgress(); requestStream.Write(buff, 0, read); requestStream.Flush(); totalRead += read; UploadProgress?.Invoke(this, totalRead, request.ContentLength); } } beginResponse(); } catch (Exception e) { Complete(e); } }
private async Task UploadToS3( S3Settings settings, Stream stream, string folderName, string fileName, UploadProgress uploadProgress, string archiveDescription) { using (var client = new RavenAwsS3Client(settings.AwsAccessKey, settings.AwsSecretKey, settings.AwsRegionName, settings.BucketName, uploadProgress, _cancellationToken.Token)) { var key = CombinePathAndKey(settings.RemoteFolderName, folderName, fileName); await client.PutObject(key, stream, new Dictionary <string, string> { { "Description", archiveDescription } }); if (_logger.IsInfoEnabled) { _logger.Info(string.Format($"Successfully uploaded backup file '{fileName}' " + $"to S3 bucket named: {settings.BucketName}, " + $"with key: {key}")); } } }
public void SetProgress(string requestId, UploadProgress progress) { lock (this) { UploadProgresses[requestId] = progress; } }
public IActionResult TrackerApi(FileIdentifier trackerId) { if (!this.ModelState.IsValid) { return(this.BadRequest()); } UploadProgress current = this._uploadProgressManager.GetProgress(trackerId); if (current == null) { this._logger.LogWarning(LogEvents.UploadNotFound, "Unable to find upload by id {0}", trackerId); return(this.NotFound("Cannot find upload with id: " + trackerId)); } UploadProgressModel model = new UploadProgressModel { Current = current.Current, Total = current.Total, FileName = current.FileName, Percent = (int)Math.Round(((double)current.Current / current.Total) * 100), Performance = current.Current.Bytes().Per(DateTime.UtcNow - current.StartTime).Humanize("#.##") }; return(this.Ok(model)); }
public void DoSomeUpload() { if (UploadProgress != null) { UploadEventArgs e = new UploadEventArgs(); e.BytesSoFar = 123f; e.TotalToUpload = 100000f; UploadProgress.Invoke(this, e); } }
private static void UploadProgressChanged(object sender, UploadProgress e) { if (e.UploadedByteCount == 0) { return; } var percent = (double)e.UploadedByteCount / e.TotalFileLength * 100.0; Console.WriteLine($"{percent:0.##}%, {e.UploadedByteCount}/{e.TotalFileLength} bytes, {e.TotalSegmentCount} segment(s)"); }
public void DataLakeUploader_CancelUpload() { CancellationTokenSource myTokenSource = new CancellationTokenSource(); var cancelToken = myTokenSource.Token; var frontEnd = new InMemoryFrontEnd(); var mockedFrontend = new MockableFrontEnd(frontEnd); mockedFrontend.GetStreamLengthImplementation = (streamPath, isDownload) => { // sleep for 2 second to allow for the cancellation to actual happen Thread.Sleep(2000); return(frontEnd.GetStreamLength(streamPath, isDownload)); }; mockedFrontend.StreamExistsImplementation = (streamPath, isDownload) => { // sleep for 2 second to allow for the cancellation to actual happen Thread.Sleep(2000); return(frontEnd.StreamExists(streamPath, isDownload)); }; var up = CreateParameters(isResume: false); UploadProgress progress = null; var syncRoot = new object(); IProgress <UploadProgress> progressTracker = new Progress <UploadProgress>( (p) => { lock (syncRoot) { //it is possible that these come out of order because of race conditions (multiple threads reporting at the same time); only update if we are actually making progress if (progress == null || progress.UploadedByteCount < p.UploadedByteCount) { progress = p; } } }); var uploader = new DataLakeStoreUploader(up, mockedFrontend, cancelToken, progressTracker); Task uploadTask = Task.Run(() => { uploader.Execute(); Thread.Sleep(2000); }, cancelToken); myTokenSource.Cancel(); Assert.True(cancelToken.IsCancellationRequested); while (uploadTask.Status == TaskStatus.Running || uploadTask.Status == TaskStatus.WaitingToRun) { Thread.Sleep(250); } // Verify that the file did not get uploaded completely. Assert.False(frontEnd.StreamExists(up.TargetStreamPath), "Uploaded stream exists when it should not yet have been completely created"); }
private async Task <string> ProcessUrlAsync(File file, string pathDisk) { _listFileUploadStates.Add(new FileState { name = file.Name, state = "Загрузка 0 %" }); Console.WriteLine($"{file.Name} - Загрузка 0 %"); var progress = new UploadProgress(UpdateProgress); await UploadAsync(file.Path, $"{pathDisk}/{file.Name}", progress); return(file.Name); }
private async Task <string> MultiPartUpload(string archiveDescription, Stream stream) { var streamLength = stream.Length; if (streamLength > MultiPartUploadLimitInBytes) { throw new InvalidOperationException(@"Can't upload more than 40TB to Amazon Glacier, " + $"current upload size: {new Size(streamLength).HumaneSize}"); } UploadProgress?.SetTotal(streamLength); UploadProgress?.ChangeType(UploadType.Chunked); // using a chunked upload we can upload up to 10,000 chunks, 4GB max each // we limit every chunk to a minimum of 128MB // constraints: the part size must be a megabyte(1024KB) // multiplied by a power of 2—for example, // 1048576(1MB), 2097152(2MB), 4194304(4MB), 8388608(8 MB), and so on. // the minimum allowable part size is 1MB and the maximum is 4GB(4096 MB). var maxLengthPerPart = Math.Max(MinOnePartUploadSizeLimitInBytes, stream.Length / 10000); const long maxPartLength = 4L * 1024 * 1024 * 1024; // 4GB var lengthPerPartPowerOf2 = Math.Min(GetNextPowerOf2(maxLengthPerPart), maxPartLength); var baseUrl = $"{GetUrl()}/multipart-uploads"; var uploadId = await GetUploadId(baseUrl, archiveDescription, lengthPerPartPowerOf2); var client = GetClient(TimeSpan.FromDays(7)); var uploadUrl = $"{baseUrl}/{uploadId}"; var fullStreamPayloadTreeHash = RavenAwsHelper.CalculatePayloadTreeHash(stream); try { while (stream.Position < streamLength) { var length = Math.Min(lengthPerPartPowerOf2, streamLength - stream.Position); await UploadPart(stream, client, uploadUrl, length, retryCount : 0); } return(await CompleteMultiUpload(uploadUrl, client, streamLength, fullStreamPayloadTreeHash)); } catch (Exception) { await AbortMultiUpload(uploadUrl, client); throw; } finally { UploadProgress?.ChangeState(UploadState.Done); } }
private UploadProgress GetProgressObject(FileIdentifier id) { var progressObject = this._uploadProgressManager.GetProgress(id); if (progressObject == null) { progressObject = new UploadProgress(); Debug.Fail("Unable to retrieve progress object - which should have been set by the handler."); } return(progressObject); }
public async Task PutObject(string key, Stream stream, Dictionary <string, string> metadata) { await TestConnection(); if (stream.Length > MaxUploadPutObjectSizeInBytes) { // for objects over 256MB await MultiPartUpload(key, stream, metadata); return; } var url = $"{GetUrl()}/{key}"; var now = SystemTime.UtcNow; var payloadHash = RavenAwsHelper.CalculatePayloadHash(stream); UploadProgress?.SetTotal(stream.Length); // stream is disposed by the HttpClient var content = new ProgressableStreamContent(stream, UploadProgress) { Headers = { { "x-amz-date", RavenAwsHelper.ConvertToString(now) }, { "x-amz-content-sha256", payloadHash } } }; foreach (var metadataKey in metadata.Keys) { content.Headers.Add("x-amz-meta-" + metadataKey.ToLower(), metadata[metadataKey]); } var headers = ConvertToHeaders(content.Headers); var client = GetClient(TimeSpan.FromHours(24)); var authorizationHeaderValue = CalculateAuthorizationHeaderValue(HttpMethods.Put, url, now, headers); client.DefaultRequestHeaders.Authorization = authorizationHeaderValue; var response = await client.PutAsync(url, content, CancellationToken); UploadProgress?.ChangeState(UploadState.Done); if (response.IsSuccessStatusCode) { return; } throw StorageException.FromResponseMessage(response); }
private async Task MultiPartUpload(string key, Stream stream, Dictionary <string, string> metadata) { var streamLength = stream.Length; if (streamLength > MultiPartUploadLimitInBytes) { throw new InvalidOperationException(@"Can't upload more than 5TB to Amazon S3, " + $"current upload size: {new Size(streamLength).HumaneSize}"); } UploadProgress?.SetTotal(streamLength); UploadProgress?.ChangeType(UploadType.Chunked); var baseUrl = $"{GetUrl()}/{key}"; var uploadId = await GetUploadId(baseUrl, metadata); var client = GetClient(TimeSpan.FromDays(7)); var partNumbersWithEtag = new List <Tuple <int, string> >(); var partNumber = 0; var completeUploadUrl = $"{baseUrl}?uploadId={uploadId}"; // using a chunked upload we can upload up to 1000 chunks, 5GB max each // we limit every chunk to a minimum of 100MB var maxLengthPerPart = Math.Max(MinOnePartUploadSizeLimitInBytes, stream.Length / 1000); try { while (stream.Position < streamLength) { var length = Math.Min(maxLengthPerPart, streamLength - stream.Position); var url = $"{baseUrl}?partNumber={++partNumber}&uploadId={uploadId}"; var etag = await UploadPart(stream, client, url, length, retryCount : 0); partNumbersWithEtag.Add(new Tuple <int, string>(partNumber, etag)); } await CompleteMultiUpload(completeUploadUrl, client, partNumbersWithEtag); } catch (Exception) { await AbortMultiUpload(client, completeUploadUrl); throw; } finally { UploadProgress?.ChangeState(UploadState.Done); } }
public void UploadFile(Stream bodyStream, string relativePath, int bufferSize = RECOMMENDED_BUFFER_SIZE) { //var request = NewRequest(WebRequestMethods.Ftp.UploadFile); var request = NewRequest(relativePath, FtpVerb.STOR); request.ContentLength = bodyStream.Length; using (var stream = request.GetRequestStream()) { bodyStream.ScanAndWriteTo(stream, bufferSize, (writeTarget, buffer, totalWrittenLength) => { UploadProgress?.Invoke(this, totalWrittenLength, bodyStream.Length); }); } }
public async Task <string> UploadArchive(Stream stream, string archiveDescription) { await TestConnection(); if (stream.Length > MaxUploadArchiveSizeInBytes) { // for objects over 256MB return(await MultiPartUpload(archiveDescription, stream)); } var url = $"{GetUrl()}/archives"; var now = SystemTime.UtcNow; var payloadHash = RavenAwsHelper.CalculatePayloadHash(stream); var payloadTreeHash = RavenAwsHelper.CalculatePayloadTreeHash(stream); UploadProgress?.SetTotal(stream.Length); // stream is disposed by the HttpClient var content = new ProgressableStreamContent(stream, UploadProgress) { Headers = { { "x-amz-glacier-version", "2012-06-01" }, { "x-amz-date", RavenAwsHelper.ConvertToString(now) }, { "x-amz-content-sha256", payloadHash }, { "x-amz-sha256-tree-hash", payloadTreeHash }, { "x-amz-archive-description", archiveDescription }, { "Content-Length", stream.Length.ToString(CultureInfo.InvariantCulture) } } }; var headers = ConvertToHeaders(content.Headers); var client = GetClient(TimeSpan.FromHours(24)); var authorizationHeaderValue = CalculateAuthorizationHeaderValue(HttpMethods.Post, url, now, headers); client.DefaultRequestHeaders.Authorization = authorizationHeaderValue; var response = await client.PostAsync(url, content, CancellationToken); UploadProgress?.ChangeState(UploadState.Done); if (response.IsSuccessStatusCode == false) { throw StorageException.FromResponseMessage(response); } return(ReadArchiveId(response)); }
protected RavenAwsClient(string awsAccessKey, string awsSecretKey, string awsRegionName, UploadProgress uploadProgress, CancellationToken?cancellationToken = null) : base(uploadProgress, cancellationToken) { if (string.IsNullOrWhiteSpace(awsRegionName)) { throw new ArgumentException("AWS region cannot be null or empty!"); } awsRegionName = awsRegionName.ToLower(); _awsAccessKey = awsAccessKey; _awsSecretKey = Encoding.UTF8.GetBytes("AWS4" + awsSecretKey); AwsRegion = awsRegionName; }
public async Task PutBlob(string key, Stream stream, Dictionary <string, string> metadata) { await TestConnection(); if (stream.Length > MaxUploadPutBlobInBytes) { // for blobs over 256MB await PutBlockApi(key, stream, metadata); return; } var url = _serverUrlForContainer + "/" + key; UploadProgress?.SetTotal(stream.Length); var now = SystemTime.UtcNow; // stream is disposed by the HttpClient var content = new ProgressableStreamContent(stream, UploadProgress) { Headers = { { "x-ms-date", now.ToString("R") }, { "x-ms-version", AzureStorageVersion }, { "x-ms-blob-type", "BlockBlob" }, { "Content-Length", stream.Length.ToString(CultureInfo.InvariantCulture) } } }; foreach (var metadataKey in metadata.Keys) { content.Headers.Add("x-ms-meta-" + metadataKey.ToLower(), metadata[metadataKey]); } var client = GetClient(TimeSpan.FromHours(3)); client.DefaultRequestHeaders.Authorization = CalculateAuthorizationHeaderValue(HttpMethods.Put, url, content.Headers); var response = await client.PutAsync(url, content, CancellationToken); UploadProgress?.ChangeState(UploadState.Done); if (response.IsSuccessStatusCode) { return; } throw StorageException.FromResponseMessage(response); }
public void OnUploadProgress(DropZoneFileUploadProgress file) { if (file is null) { throw new ArgumentNullException(nameof(file)); } if (file.Path is null) { throw new ArgumentException("file's Path Property should not be null.", nameof(file)); } if (file.Name is null) { throw new ArgumentException("file's Name Property should not be null.", nameof(file)); } UploadProgress.InvokeAsync(new DropZoneUploadProgressEventArgs(file.Path, file.Name, file.Size, file.Key, file.SessionId, file.Progress)); }
public async Task UploadAsync(string pathFile, string pathSave, UploadProgress progress, CancellationToken cancelToken = default) { Stream stream = new FileStream(pathFile, FileMode.Open, FileAccess.Read, FileShare.ReadWrite); var uploadLink = await GetUploadLinkAsync(pathSave); var fileName = Path.GetFileName(((FileStream)stream).Name); var streamContent = new YandexStreamContent(stream); streamContent.ProgressChanged += (bytes, currBytes, totalBytes) => progress.UpdateProgress(currBytes, totalBytes, fileName); var content = new MultipartFormDataContent { { streamContent, "file", fileName } }; using var yandHttpClient = new YandexHttpClient(_yandUploader.Token); await yandHttpClient.PostAsync(uploadLink.Href, content, cancelToken); }
public void Upload(string ComPort, byte[] data, int uploadBaudrate = 115200) { UploadProgress?.Invoke(this, 0); this.SerialPort = new SerialPort(ComPort); try { try { this.SerialPort.Open(); } catch (Exception ex) { Message?.Invoke(this, "Unable to open Serial Port " + ComPort); return; } NextionInfos model = null; try { model = DetectNextion(); } catch { } if (model != null) { Message?.Invoke(this, "Nextion found " + model.Model); SwitchToUploadMode(data, uploadBaudrate); UploadData(data); } else { Message?.Invoke(this, "Nextion not found"); } } finally { try { SerialPort.Close(); } catch { } } }
private UploadStatus UploadData(byte[] data) { SerialPort.ReadTimeout = 2000; SerialPort.WriteTimeout = 5000; try { if (CancelRequested) { return(UploadStatus.Cancel); } Message?.Invoke(this, "Starting upload"); for (var i = 0; i < Math.Ceiling((double)data.Length / 4096d); i++) { if (CancelRequested) { return(UploadStatus.Cancel); } SerialPort.Write(data, i * 4096, Math.Min(4096, data.Length - i * 4096)); SerialPort.BaseStream.Flush(); int d = 0; try { d = SerialPort.ReadByte(); } catch { } if (d != 5) { Message?.Invoke(this, "Upload error"); return(UploadStatus.Error); } UploadProgress?.Invoke(this, (double)i / Math.Ceiling((double)data.Length / 4096d)); } } catch { Message?.Invoke(this, "Upload error"); return(UploadStatus.Error); } Message?.Invoke(this, "Upload finished"); return(UploadStatus.Ok); }
public async Task Backup(CancellationToken cancellationToken) { var progress = new UploadProgress(_logger); var backupLocations = _backupProvider.BackupDirectories(); _logger.Trace("Directories being backed up: {0}", string.Join(", ", backupLocations.Select(file => file.FilePath))); var compressedLocations = await _compressionProvider.CompressAsync(backupLocations, progress, cancellationToken).ConfigureAwait(false); _logger.Trace("Compressed back ups: {0}", string.Join(", ", compressedLocations.Select(file => file.FileInfo.FullName))); var encrypedLocations = await _cryptoProvider.Encrypt(compressedLocations, cancellationToken).ConfigureAwait(false); _logger.Trace("Encrypted back ups: {0}", string.Join(", ", encrypedLocations.Select(file => file.FileInfo.FullName))); var uploadedLocations = await _uploadProvider.Upload(encrypedLocations, progress, cancellationToken).ConfigureAwait(false); _logger.Info("Files backed up: {0}", string.Join(", ", uploadedLocations.Select(file => $"ArchiveId({file.ArchiveId})|Description({file.BackupDescription})"))); await CleanupFiles(compressedLocations.Concat(encrypedLocations), cancellationToken).ConfigureAwait(false); }
public async Task ExecuteAsync(HttpContext context) { FileIdentifier identifier = FileIdentifier.FromString(context.GetRouteValue("fileIdentifier")?.ToString() ?? throw new InvalidOperationException("No ID")); this._logger.LogInformation(LogEvents.NewUpload, "New upload of file with id {0}", identifier); // We have already the ID, so we can set some progress UploadProgress progress = new UploadProgress { Current = 0, StartTime = DateTime.UtcNow, Total = context.Request.ContentLength ?? -1 }; this._uploadProgressManager.SetProgress(identifier, progress); // Initialize reading request MediaTypeHeaderValue contentType = GetContentType(context); string boundary = GetBoundary(contentType); MultipartReader reader = new MultipartReader(boundary, context.Request.Body); reader.BodyLengthLimit = (long?)this._fileStoreOptions.Value?.MaximumFileSize.Megabytes().Bytes; // Delegate actual request parsing // ... after the request "completes" we re-execute to send the final response to the browser try { await using (context.RequestAborted.Register(context.Abort)) { await this._uploadManager.StoreAsync(identifier, reader, context.RequestAborted); } PrepForReExecute(context, new UploadErrorsModel()); } catch (UploadCryptoArgumentOrderException) { PrepForReExecute(context, UploadErrorsModel.CreateFromMessage("Invalid order of cryptographic parameters: file was uploaded before password.")); } catch (Exception ex) { UploadErrorsModel errors = UploadErrorsModel.CreateFromMessage(ex.Message); this._logger.LogError(LogEvents.UploadFailed, "Detected failed upload - passing error to child handler: {0}", ex); PrepForReExecute(context, errors); } await ReExecuteAsync(context); }
private async Task UploadToFtp( FtpSettings settings, Stream stream, string folderName, string fileName, UploadProgress uploadProgress) { using (var client = new RavenFtpClient(settings.Url, settings.Port, settings.UserName, settings.Password, settings.CertificateAsBase64, settings.CertificateFileName, uploadProgress, _cancellationToken.Token)) { await client.UploadFile(folderName, fileName, stream); if (_logger.IsInfoEnabled) { _logger.Info($"Successfully uploaded backup file '{fileName}' to an ftp server"); } } }
/// <summary> /// Verifies the progress status. /// </summary> /// <param name="progress">The upload progress.</param> /// <param name="fileLength">The file length.</param> private void VerifyProgressStatus(UploadProgress progress, long fileLength) { Assert.Equal(fileLength, progress.TotalFileLength); Assert.True(1 < progress.TotalSegmentCount, "UploadProgress: Unexpected value for TotalSegmentCount"); Assert.Equal(progress.TotalFileLength, progress.UploadedByteCount); long uploadedByteSum = 0; for (int i = 0; i < progress.TotalSegmentCount; i++) { var segmentProgress = progress.GetSegmentProgress(i); Assert.False(segmentProgress.IsFailed, string.Format("UploadProgress: Segment {0} seems to have failed", i)); Assert.Equal(i, segmentProgress.SegmentNumber); Assert.Equal(segmentProgress.Length, segmentProgress.UploadedByteCount); uploadedByteSum += segmentProgress.UploadedByteCount; } Assert.Equal(progress.UploadedByteCount, uploadedByteSum); }