public void DataLakeUploader_ResumePartialFolderUpload() { //attempt to load the file fully, but only allow creating 1 target stream var backingFrontEnd = new InMemoryFrontEnd(); var frontEnd = new MockableFrontEnd(backingFrontEnd); int createStreamCount = 0; frontEnd.CreateStreamImplementation = (path, overwrite, data, byteCount) => { createStreamCount++; if (createStreamCount > 1) { //we only allow 1 file to be created throw new IntentionalException(); } backingFrontEnd.CreateStream(path, overwrite, data, byteCount); }; var up = CreateParameters(isResume: false, isRecursive: true); var uploader = new DataLakeStoreUploader(up, frontEnd); uploader.DeleteMetadataFile(); Assert.Throws <AggregateException>(() => uploader.Execute()); Assert.Equal(1, frontEnd.ListDirectory(up.TargetStreamPath, false).Keys.Count); Assert.Equal(1, backingFrontEnd.StreamCount); //resume the upload but point it to the real back-end, which doesn't throw exceptions up = CreateParameters(isResume: true, isRecursive: true); uploader = new DataLakeStoreUploader(up, backingFrontEnd); try { uploader.Execute(); } finally { uploader.DeleteMetadataFile(); } VerifyFileUploadedSuccessfully(up, backingFrontEnd); }
public void DataLakeUploader_ResumePartialUploadDownload() { //attempt to load the file fully, but only allow creating 1 target stream var backingFrontEnd = new InMemoryFrontEnd(); var frontEnd = new MockableFrontEnd(backingFrontEnd); int createStreamCount = 0; frontEnd.CreateStreamImplementation = (path, overwrite, data, byteCount) => { createStreamCount++; if (createStreamCount > 1) { //we only allow 1 file to be created throw new IntentionalException(); } backingFrontEnd.CreateStream(path, overwrite, data, byteCount); }; var up = CreateParameters(isResume: false); var uploader = new DataLakeStoreUploader(up, frontEnd); uploader.DeleteMetadataFile(); Assert.Throws <AggregateException>(() => uploader.Execute()); Assert.Equal(1, frontEnd.ListDirectory(up.TargetStreamPath, false).Keys.Count); Assert.Equal(1, backingFrontEnd.StreamCount); //resume the upload but point it to the real back-end, which doesn't throw exceptions up = CreateParameters(isResume: true); uploader = new DataLakeStoreUploader(up, backingFrontEnd); try { uploader.Execute(); } finally { uploader.DeleteMetadataFile(); } VerifyFileUploadedSuccessfully(up, backingFrontEnd); // now download the same way. var frontEnd2 = new MockableFrontEnd(backingFrontEnd); // need to have data from the successful upload available. createStreamCount = 0; frontEnd2.ReadStreamImplementation = (path, data, byteCount, isDownload) => { createStreamCount++; if (createStreamCount > 1) { //we only allow 1 file to be created throw new IntentionalException(); } return(backingFrontEnd.ReadStream(path, data, byteCount, isDownload)); }; up = CreateParameters(isResume: false, isDownload: true, targetStreamPath: _downloadFilePath, isOverwrite: true, filePath: up.TargetStreamPath); uploader = new DataLakeStoreUploader(up, frontEnd2); Assert.Throws <AggregateException>(() => uploader.Execute()); Assert.False(frontEnd2.StreamExists(up.TargetStreamPath), "Target stream should not have been created"); // now use the good front end up = CreateParameters(isResume: true, isDownload: true, targetStreamPath: _downloadFilePath, isOverwrite: true, filePath: up.InputFilePath); uploader = new DataLakeStoreUploader(up, backingFrontEnd); //resume the download but point it to the real back-end, which doesn't throw exceptions try { uploader.Execute(); } finally { uploader.DeleteMetadataFile(); } VerifyFileUploadedSuccessfully(up, backingFrontEnd); }
public void DataLakeUploader_ResumePartialFolderUploadWithProgress() { //attempt to load the file fully, but only allow creating 1 target stream var backingFrontEnd = new InMemoryFrontEnd(); var frontEnd = new MockableFrontEnd(backingFrontEnd); UploadFolderProgress progress = null; var syncRoot = new object(); IProgress <UploadFolderProgress> progressTracker = new Progress <UploadFolderProgress>( (p) => { lock (syncRoot) { //it is possible that these come out of order because of race conditions (multiple threads reporting at the same time); only update if we are actually making progress if (progress == null || progress.UploadedByteCount < p.UploadedByteCount) { progress = p; } } }); int createStreamCount = 0; frontEnd.CreateStreamImplementation = (path, overwrite, data, byteCount) => { createStreamCount++; if (createStreamCount > 1) { //we only allow 1 file to be created throw new IntentionalException(); } backingFrontEnd.CreateStream(path, overwrite, data, byteCount); }; var up = CreateParameters(isResume: false, isRecursive: true); var uploader = new DataLakeStoreUploader(up, frontEnd, folderProgressTracker: progressTracker); uploader.DeleteMetadataFile(); // Verifies that a bug in folder upload with progress hung on failure is fixed. try { var uploadTask = Task.Run(() => { uploader.Execute(); }); uploadTask.Wait(TimeSpan.FromSeconds(60)); Assert.True(false, "Folder upload did not fail after error in less than 60 seconds"); } catch (Exception ex) { Assert.True(ex is AggregateException, "The exception thrown by upload was not the expected aggregate exception."); } Assert.Equal(1, frontEnd.ListDirectory(up.TargetStreamPath, false).Keys.Count); Assert.Equal(1, backingFrontEnd.StreamCount); //resume the upload but point it to the real back-end, which doesn't throw exceptions up = CreateParameters(isResume: true, isRecursive: true); uploader = new DataLakeStoreUploader(up, backingFrontEnd, folderProgressTracker: progressTracker); try { var uploadTask = Task.Run(() => { uploader.Execute(); }); uploadTask.Wait(TimeSpan.FromSeconds(60)); Assert.True(uploadTask.IsCompleted, "Folder upload did not complete after error in less than 60 seconds"); } finally { uploader.DeleteMetadataFile(); } VerifyFileUploadedSuccessfully(up, backingFrontEnd); VerifyFolderProgressStatus(progress, _largeFileData.Length + (_smallFileData.Length * 2), 3); }