예제 #1
0
        public void DataLakeUploader_ResumePartialUpload()
        {
            //attempt to load the file fully, but only allow creating 1 target stream
            var backingFrontEnd = new InMemoryFrontEnd();
            var frontEnd        = new MockableFrontEnd(backingFrontEnd);

            int createStreamCount = 0;

            frontEnd.CreateStreamImplementation = (path, overwrite, data, byteCount) =>
            {
                createStreamCount++;
                if (createStreamCount > 1)
                {
                    //we only allow 1 file to be created
                    throw new IntentionalException();
                }
                backingFrontEnd.CreateStream(path, overwrite, data, byteCount);
            };
            var up       = CreateParameters(isResume: false);
            var uploader = new DataLakeStoreUploader(up, frontEnd);

            uploader.DeleteMetadataFile();

            Assert.Throws <AggregateException>(() => uploader.Execute());
            Assert.False(frontEnd.StreamExists(up.TargetStreamPath), "Target stream should not have been created");
            Assert.Equal(1, backingFrontEnd.StreamCount);

            //resume the upload but point it to the real back-end, which doesn't throw exceptions
            up       = CreateParameters(isResume: true);
            uploader = new DataLakeStoreUploader(up, backingFrontEnd);

            try
            {
                uploader.Execute();
            }
            finally
            {
                uploader.DeleteMetadataFile();
            }

            VerifyFileUploadedSuccessfully(up, backingFrontEnd);
        }
예제 #2
0
        public void DataLakeUploader_ResumeUploadWithAllMissingFiles()
        {
            //this scenario is achieved by refusing to execute the concat command on the front end for the initial upload (which will interrupt it)
            //and then resuming the upload against a fresh front-end (which obviously has no files there)

            var backingFrontEnd1 = new InMemoryFrontEnd();
            var frontEnd1        = new MockableFrontEnd(backingFrontEnd1);

            frontEnd1.ConcatenateImplementation = (target, inputs) => { throw new IntentionalException(); }; //fail the concatenation

            //attempt full upload
            var up       = CreateParameters(isResume: false);
            var uploader = new DataLakeStoreUploader(up, frontEnd1);

            uploader.DeleteMetadataFile();

            Assert.Throws <IntentionalException>(() => uploader.Execute());
            Assert.False(frontEnd1.StreamExists(up.TargetStreamPath), "Target stream should not have been created");
            Assert.True(0 < backingFrontEnd1.StreamCount, "No temporary streams seem to have been created");

            //attempt to resume the upload
            var frontEnd2 = new InMemoryFrontEnd();

            up       = CreateParameters(isResume: true);
            uploader = new DataLakeStoreUploader(up, frontEnd2);

            //at this point the metadata exists locally but there are no target files in frontEnd2
            try
            {
                uploader.Execute();
            }
            finally
            {
                uploader.DeleteMetadataFile();
            }

            VerifyFileUploadedSuccessfully(up, frontEnd2);
        }
        public void DataLakeUploader_ResumePartialUpload()
        {
            //attempt to load the file fully, but only allow creating 1 target stream
            var backingFrontEnd = new InMemoryFrontEnd();
            var frontEnd = new MockableFrontEnd(backingFrontEnd);

            int createStreamCount = 0;
            frontEnd.CreateStreamImplementation = (path, overwrite, data, byteCount) =>
            {
                createStreamCount++;
                if (createStreamCount > 1)
                {
                    //we only allow 1 file to be created
                    throw new IntentionalException();
                }
                backingFrontEnd.CreateStream(path, overwrite, data, byteCount);
            };
            var up = CreateParameters(isResume: false);
            var uploader = new DataLakeStoreUploader(up, frontEnd);
            uploader.DeleteMetadataFile();

            Assert.Throws<AggregateException>(() => uploader.Execute());
            Assert.False(frontEnd.StreamExists(up.TargetStreamPath), "Target stream should not have been created");
            Assert.Equal(1, backingFrontEnd.StreamCount);

            //resume the upload but point it to the real back-end, which doesn't throw exceptions
            up = CreateParameters(isResume: true);
            uploader = new DataLakeStoreUploader(up, backingFrontEnd);

            try
            {
                uploader.Execute();
            }
            finally
            {
                uploader.DeleteMetadataFile();
            }

            VerifyFileUploadedSuccessfully(up, backingFrontEnd);
        }
        public void DataLakeUploader_ResumeUploadWithAllMissingFiles()
        {
            //this scenario is achieved by refusing to execute the concat command on the front end for the initial upload (which will interrupt it)
            //and then resuming the upload against a fresh front-end (which obviously has no files there)
            
            var backingFrontEnd1 = new InMemoryFrontEnd();
            var frontEnd1 = new MockableFrontEnd(backingFrontEnd1);
            frontEnd1.ConcatenateImplementation = (target, inputs) => { throw new IntentionalException(); }; //fail the concatenation
            
            //attempt full upload
            var up = CreateParameters(isResume: false);
            var uploader = new DataLakeStoreUploader(up, frontEnd1);
            uploader.DeleteMetadataFile();

            Assert.Throws<IntentionalException>(() => uploader.Execute());
            Assert.False(frontEnd1.StreamExists(up.TargetStreamPath), "Target stream should not have been created");
            Assert.True(0 < backingFrontEnd1.StreamCount, "No temporary streams seem to have been created");

            //attempt to resume the upload
            var frontEnd2 = new InMemoryFrontEnd();
            up = CreateParameters(isResume: true);
            uploader = new DataLakeStoreUploader(up, frontEnd2);

            //at this point the metadata exists locally but there are no target files in frontEnd2
            try
            {
                uploader.Execute();
            }
            finally
            {
                uploader.DeleteMetadataFile();
            }

            VerifyFileUploadedSuccessfully(up, frontEnd2);
        }
예제 #5
0
        public void DataLakeUploader_ResumePartialUploadDownload()
        {
            //attempt to load the file fully, but only allow creating 1 target stream
            var backingFrontEnd = new InMemoryFrontEnd();
            var frontEnd        = new MockableFrontEnd(backingFrontEnd);

            int createStreamCount = 0;

            frontEnd.CreateStreamImplementation = (path, overwrite, data, byteCount) =>
            {
                createStreamCount++;
                if (createStreamCount > 1)
                {
                    //we only allow 1 file to be created
                    throw new IntentionalException();
                }
                backingFrontEnd.CreateStream(path, overwrite, data, byteCount);
            };

            var up       = CreateParameters(isResume: false);
            var uploader = new DataLakeStoreUploader(up, frontEnd);

            uploader.DeleteMetadataFile();

            Assert.Throws <AggregateException>(() => uploader.Execute());
            Assert.Equal(1, frontEnd.ListDirectory(up.TargetStreamPath, false).Keys.Count);
            Assert.Equal(1, backingFrontEnd.StreamCount);

            //resume the upload but point it to the real back-end, which doesn't throw exceptions
            up       = CreateParameters(isResume: true);
            uploader = new DataLakeStoreUploader(up, backingFrontEnd);

            try
            {
                uploader.Execute();
            }
            finally
            {
                uploader.DeleteMetadataFile();
            }

            VerifyFileUploadedSuccessfully(up, backingFrontEnd);

            // now download the same way.
            var frontEnd2 = new MockableFrontEnd(backingFrontEnd); // need to have data from the successful upload available.

            createStreamCount = 0;
            frontEnd2.ReadStreamImplementation = (path, data, byteCount, isDownload) =>
            {
                createStreamCount++;
                if (createStreamCount > 1)
                {
                    //we only allow 1 file to be created
                    throw new IntentionalException();
                }
                return(backingFrontEnd.ReadStream(path, data, byteCount, isDownload));
            };

            up       = CreateParameters(isResume: false, isDownload: true, targetStreamPath: _downloadFilePath, isOverwrite: true, filePath: up.TargetStreamPath);
            uploader = new DataLakeStoreUploader(up, frontEnd2);

            Assert.Throws <AggregateException>(() => uploader.Execute());
            Assert.False(frontEnd2.StreamExists(up.TargetStreamPath), "Target stream should not have been created");

            // now use the good front end
            up       = CreateParameters(isResume: true, isDownload: true, targetStreamPath: _downloadFilePath, isOverwrite: true, filePath: up.InputFilePath);
            uploader = new DataLakeStoreUploader(up, backingFrontEnd);

            //resume the download but point it to the real back-end, which doesn't throw exceptions
            try
            {
                uploader.Execute();
            }
            finally
            {
                uploader.DeleteMetadataFile();
            }

            VerifyFileUploadedSuccessfully(up, backingFrontEnd);
        }
예제 #6
0
        public void DataLakeUploader_ResumePartialFolderUploadWithProgress()
        {
            //attempt to load the file fully, but only allow creating 1 target stream
            var backingFrontEnd           = new InMemoryFrontEnd();
            var frontEnd                  = new MockableFrontEnd(backingFrontEnd);
            UploadFolderProgress progress = null;
            var syncRoot                  = new object();
            IProgress <UploadFolderProgress> progressTracker = new Progress <UploadFolderProgress>(
                (p) =>
            {
                lock (syncRoot)
                {
                    //it is possible that these come out of order because of race conditions (multiple threads reporting at the same time); only update if we are actually making progress
                    if (progress == null || progress.UploadedByteCount < p.UploadedByteCount)
                    {
                        progress = p;
                    }
                }
            });
            int createStreamCount = 0;

            frontEnd.CreateStreamImplementation = (path, overwrite, data, byteCount) =>
            {
                createStreamCount++;
                if (createStreamCount > 1)
                {
                    //we only allow 1 file to be created
                    throw new IntentionalException();
                }
                backingFrontEnd.CreateStream(path, overwrite, data, byteCount);
            };
            var up       = CreateParameters(isResume: false, isRecursive: true);
            var uploader = new DataLakeStoreUploader(up, frontEnd, folderProgressTracker: progressTracker);

            uploader.DeleteMetadataFile();

            // Verifies that a bug in folder upload with progress hung on failure is fixed.
            try
            {
                var uploadTask = Task.Run(() =>
                {
                    uploader.Execute();
                });

                uploadTask.Wait(TimeSpan.FromSeconds(60));
                Assert.True(false, "Folder upload did not fail after error in less than 60 seconds");
            }
            catch (Exception ex)
            {
                Assert.True(ex is AggregateException, "The exception thrown by upload was not the expected aggregate exception.");
            }

            Assert.Equal(1, frontEnd.ListDirectory(up.TargetStreamPath, false).Keys.Count);
            Assert.Equal(1, backingFrontEnd.StreamCount);

            //resume the upload but point it to the real back-end, which doesn't throw exceptions
            up       = CreateParameters(isResume: true, isRecursive: true);
            uploader = new DataLakeStoreUploader(up, backingFrontEnd, folderProgressTracker: progressTracker);

            try
            {
                var uploadTask = Task.Run(() =>
                {
                    uploader.Execute();
                });

                uploadTask.Wait(TimeSpan.FromSeconds(60));
                Assert.True(uploadTask.IsCompleted, "Folder upload did not complete after error in less than 60 seconds");
            }
            finally
            {
                uploader.DeleteMetadataFile();
            }

            VerifyFileUploadedSuccessfully(up, backingFrontEnd);
            VerifyFolderProgressStatus(progress, _largeFileData.Length + (_smallFileData.Length * 2), 3);
        }