private static void UploadFolderProgressChanged(object sender, UploadFolderProgress e) { if (e.UploadedByteCount == 0) { return; } var percent = (double)e.UploadedByteCount / e.TotalFileLength * 100.0; Console.WriteLine($"{percent:0.##}%, {e.UploadedFileCount}/{e.TotalFileCount} files, {e.UploadedByteCount}/{e.TotalFileLength} bytes"); }
/// <summary> /// Verifies the progress status. /// </summary> /// <param name="progress">The progress.</param> /// <param name="totalFileLength">Total length of the file.</param> /// <param name="totalFiles">The total files.</param> private void VerifyFolderProgressStatus(UploadFolderProgress progress, long totalFileLength, int totalFiles) { Assert.Equal(totalFileLength, progress.TotalFileLength); Assert.Equal(totalFiles, progress.TotalFileCount); Assert.Equal(progress.TotalFileCount, progress.UploadedFileCount); Assert.Equal(progress.TotalFileLength, progress.UploadedByteCount); for (int i = 0; i < progress.TotalFileCount; i++) { var eachProgress = progress.GetSegmentProgress(i); VerifyProgressStatus(eachProgress, eachProgress.TotalFileLength); } }
public void DataLakeUploader_FreshFolderUploadDownload() { var frontEnd = new InMemoryFrontEnd(); var up = CreateParameters(isResume: false, isRecursive: true); UploadFolderProgress progress = null; var syncRoot = new object(); IProgress <UploadFolderProgress> progressTracker = new Progress <UploadFolderProgress>( (p) => { lock (syncRoot) { //it is possible that these come out of order because of race conditions (multiple threads reporting at the same time); only update if we are actually making progress if (progress == null || progress.UploadedByteCount < p.UploadedByteCount) { progress = p; } } }); var uploader = new DataLakeStoreUploader(up, frontEnd, null, progressTracker); uploader.Execute(); VerifyFileUploadedSuccessfully(up, frontEnd); VerifyFolderProgressStatus(progress, _largeFileData.Length + (_smallFileData.Length * 2), 3); // now download var downloadFrontEnd = new MockableFrontEnd(frontEnd); // replace the isDirectory implementation to return true downloadFrontEnd.IsDirectoryImplementation = (streamPath) => { return(true); }; progress = null; up = CreateParameters(isRecursive: true, isResume: false, isDownload: true, targetStreamPath: Path.GetDirectoryName(_downloadFilePath), isOverwrite: true, filePath: TargetStreamPath); uploader = new DataLakeStoreUploader(up, downloadFrontEnd, null, progressTracker); uploader.Execute(); VerifyFileUploadedSuccessfully(up, downloadFrontEnd.BaseAdapter); VerifyFolderProgressStatus(progress, _largeFileData.Length + (_smallFileData.Length * 2), 3); }
public void DataLakeUploader_ResumePartialFolderUploadWithProgress() { //attempt to load the file fully, but only allow creating 1 target stream var backingFrontEnd = new InMemoryFrontEnd(); var frontEnd = new MockableFrontEnd(backingFrontEnd); UploadFolderProgress progress = null; var syncRoot = new object(); IProgress <UploadFolderProgress> progressTracker = new Progress <UploadFolderProgress>( (p) => { lock (syncRoot) { //it is possible that these come out of order because of race conditions (multiple threads reporting at the same time); only update if we are actually making progress if (progress == null || progress.UploadedByteCount < p.UploadedByteCount) { progress = p; } } }); int createStreamCount = 0; frontEnd.CreateStreamImplementation = (path, overwrite, data, byteCount) => { createStreamCount++; if (createStreamCount > 1) { //we only allow 1 file to be created throw new IntentionalException(); } backingFrontEnd.CreateStream(path, overwrite, data, byteCount); }; var up = CreateParameters(isResume: false, isRecursive: true); var uploader = new DataLakeStoreUploader(up, frontEnd, folderProgressTracker: progressTracker); uploader.DeleteMetadataFile(); // Verifies that a bug in folder upload with progress hung on failure is fixed. try { var uploadTask = Task.Run(() => { uploader.Execute(); }); uploadTask.Wait(TimeSpan.FromSeconds(60)); Assert.True(false, "Folder upload did not fail after error in less than 60 seconds"); } catch (Exception ex) { Assert.True(ex is AggregateException, "The exception thrown by upload was not the expected aggregate exception."); } Assert.Equal(1, frontEnd.ListDirectory(up.TargetStreamPath, false).Keys.Count); Assert.Equal(1, backingFrontEnd.StreamCount); //resume the upload but point it to the real back-end, which doesn't throw exceptions up = CreateParameters(isResume: true, isRecursive: true); uploader = new DataLakeStoreUploader(up, backingFrontEnd, folderProgressTracker: progressTracker); try { var uploadTask = Task.Run(() => { uploader.Execute(); }); uploadTask.Wait(TimeSpan.FromSeconds(60)); Assert.True(uploadTask.IsCompleted, "Folder upload did not complete after error in less than 60 seconds"); } finally { uploader.DeleteMetadataFile(); } VerifyFileUploadedSuccessfully(up, backingFrontEnd); VerifyFolderProgressStatus(progress, _largeFileData.Length + (_smallFileData.Length * 2), 3); }