Exemple #1
0
        public void SingleSegmentUploader_UploadNonBinaryFile()
        {
            var fe = new InMemoryFrontEnd();

            var metadata = CreateMetadata(_textFilePath, _textFileContents.Length);

            metadata.IsBinary = false;
            var progressTracker = new TestProgressTracker();
            var ssu             = new SingleSegmentUploader(0, metadata, fe, progressTracker);

            ssu.UseBackOffRetryStrategy = false;
            ssu.Upload();

            //verify the entire file is identical to the source file
            var actualContents = fe.GetStreamContents(StreamPath);

            AssertExtensions.AreEqual(_textFileContents, actualContents, "Unexpected uploaded stream contents.");

            //verify the append blocks start/end on record boundaries
            var appendBlocks = fe.GetAppendBlocks(StreamPath);
            int lengthSoFar  = 0;

            foreach (var append in appendBlocks)
            {
                lengthSoFar += append.Length;
                if (lengthSoFar < actualContents.Length)
                {
                    Assert.Equal('\n', (char)append[append.Length - 1]);
                }
            }

            VerifyTracker(progressTracker, true);
        }
        public void DataLakeUploader_UploadDownloadSingleSegment()
        {
            var frontEnd = new InMemoryFrontEnd();
            var up       = new TransferParameters(
                inputFilePath: _smallFilePath,
                targetStreamPath: "1",
                perFileThreadCount: ThreadCount,
                accountName: "foo",
                isResume: false,
                maxSegmentLength: 4 * 1024 * 1024,
                localMetadataLocation: Path.GetTempPath());

            File.WriteAllBytes(_smallFilePath, _smallFileData);

            var uploader = new DataLakeStoreTransferClient(up, frontEnd);

            uploader.Execute();

            VerifyFileUploadedSuccessfully(up, frontEnd, _smallFileData);
            up = new TransferParameters(
                inputFilePath: "1",
                targetStreamPath: _downloadFilePath,
                perFileThreadCount: ThreadCount,
                accountName: "foo",
                isResume: false,
                isOverwrite: true,
                isDownload: true,
                maxSegmentLength: 4 * 1024 * 1024,
                localMetadataLocation: Path.GetTempPath());

            // now download
            uploader = new DataLakeStoreTransferClient(up, frontEnd);
            uploader.Execute();
            VerifyFileUploadedSuccessfully(up, frontEnd, _smallFileData);
        }
        public void MultipleSegmentUploader_ResumedUploadWithMultipleSegments()
        {
            //the strategy here is to upload everything, then delete a set of the segments, and verify that a resume will pick up the slack

            var fe       = new InMemoryFrontEnd();
            var metadata = CreateMetadata(10);

            try
            {
                var msu = new MultipleSegmentUploader(metadata, 1, fe);
                msu.UseSegmentBlockBackOffRetryStrategy = false;
                msu.Upload();
                VerifyTargetStreamsAreComplete(metadata, fe);

                //delete about 50% of segments
                for (int i = 0; i < metadata.SegmentCount; i++)
                {
                    var currentSegment = metadata.Segments[i];
                    if (i % 2 == 0)
                    {
                        currentSegment.Status = SegmentTransferStatus.Pending;
                        fe.DeleteStream(currentSegment.Path);
                    }
                }

                //re-upload everything
                msu = new MultipleSegmentUploader(metadata, 1, fe);
                msu.Upload();
                VerifyTargetStreamsAreComplete(metadata, fe);
            }
            finally
            {
                metadata.DeleteFile();
            }
        }
 private void VerifyTargetStreamsAreComplete(TransferMetadata metadata, InMemoryFrontEnd fe)
 {
     foreach (var segment in metadata.Segments)
     {
         VerifyTargetStreamIsComplete(segment, metadata, fe);
     }
 }
        public void DataLakeUploader_CancelUpload()
        {
            CancellationTokenSource myTokenSource = new CancellationTokenSource();
            var cancelToken    = myTokenSource.Token;
            var frontEnd       = new InMemoryFrontEnd();
            var mockedFrontend = new MockableFrontEnd(frontEnd);

            mockedFrontend.GetStreamLengthImplementation = (streamPath, isDownload) =>
            {
                // sleep for 2 second to allow for the cancellation to actual happen
                Thread.Sleep(2000);
                return(frontEnd.GetStreamLength(streamPath, isDownload));
            };

            mockedFrontend.StreamExistsImplementation = (streamPath, isDownload) =>
            {
                // sleep for 2 second to allow for the cancellation to actual happen
                Thread.Sleep(2000);
                return(frontEnd.StreamExists(streamPath, isDownload));
            };
            var up = CreateParameters(isResume: false);
            TransferProgress progress = null;
            var syncRoot = new object();
            IProgress <TransferProgress> progressTracker = new Progress <TransferProgress>(
                (p) =>
            {
                lock (syncRoot)
                {
                    //it is possible that these come out of order because of race conditions (multiple threads reporting at the same time); only update if we are actually making progress
                    if (progress == null || progress.TransferredByteCount < p.TransferredByteCount)
                    {
                        progress = p;
                    }
                }
            });
            var uploader = new DataLakeStoreTransferClient(up, mockedFrontend, cancelToken, progressTracker);

            Task uploadTask = Task.Run(() =>
            {
                uploader.Execute();
                Thread.Sleep(2000);
            }, cancelToken);

            myTokenSource.Cancel();
            Assert.True(cancelToken.IsCancellationRequested);

            while (uploadTask.Status == TaskStatus.Running || uploadTask.Status == TaskStatus.WaitingToRun)
            {
                Thread.Sleep(250);
            }

            // Verify that the file did not get uploaded completely.
            Assert.False(frontEnd.StreamExists(up.TargetStreamPath), "Uploaded stream exists when it should not yet have been completely created");
        }
Exemple #6
0
        public void SingleSegmentUploader_UploadNonBinaryFileTooLargeRecord()
        {
            var fe = new InMemoryFrontEnd();

            var metadata = CreateMetadata(_badTextFilePath, _badTextFileContents.Length);

            metadata.IsBinary = false;
            var progressTracker = new TestProgressTracker();
            var ssu             = new SingleSegmentUploader(0, metadata, fe, progressTracker);

            ssu.UseBackOffRetryStrategy = false;
            Assert.Throws <TransferFailedException>(() => ssu.Upload());
        }
        /// <summary>
        /// Constructor with base front end.
        /// </summary>
        /// <param name="baseAdapter">The front end.</param>
        public MockableFrontEnd(IFrontEndAdapter baseAdapter)
        {
            this.AppendToStreamImplementation  = baseAdapter.AppendToStream;
            this.ConcatenateImplementation     = baseAdapter.Concatenate;
            this.CreateStreamImplementation    = baseAdapter.CreateStream;
            this.DeleteStreamImplementation    = baseAdapter.DeleteStream;
            this.GetStreamLengthImplementation = baseAdapter.GetStreamLength;
            this.StreamExistsImplementation    = baseAdapter.StreamExists;
            this.ReadStreamImplementation      = baseAdapter.ReadStream;
            this.IsDirectoryImplementation     = baseAdapter.IsDirectory;
            this.ListDirectoryImplementation   = baseAdapter.ListDirectory;

            BaseAdapter = baseAdapter as InMemoryFrontEnd;
        }
Exemple #8
0
        internal void TestRetryBlock(int failCount)
        {
            bool expectSuccess = failCount < SingleSegmentUploader.MaxBufferUploadAttemptCount;

            int callCount = 0;

            var workingFrontEnd = new InMemoryFrontEnd();
            var fe = new MockableFrontEnd(workingFrontEnd);

            fe.CreateStreamImplementation =
                (streamPath, overwrite, data, byteCount) =>
            {
                callCount++;
                if (callCount <= failCount)
                {
                    throw new IntentionalException();
                }
                workingFrontEnd.CreateStream(streamPath, overwrite, data, byteCount);
            };

            fe.AppendToStreamImplementation =
                (streamPath, data, offset, byteCount) =>
            {
                callCount++;
                if (callCount <= failCount)
                {
                    throw new IntentionalException();
                }
                workingFrontEnd.AppendToStream(streamPath, data, offset, byteCount);
            };

            var metadata        = CreateMetadata(_smallFilePath, _smallFileContents.Length);
            var progressTracker = new TestProgressTracker();
            var ssu             = new SingleSegmentUploader(0, metadata, fe, progressTracker);

            ssu.UseBackOffRetryStrategy = false;

            if (expectSuccess)
            {
                ssu.Upload();
                var actualContents = workingFrontEnd.GetStreamContents(StreamPath);
                AssertExtensions.AreEqual(_smallFileContents, actualContents, "Unexpected uploaded stream contents.");
            }
            else
            {
                Assert.Throws <IntentionalException>(() => { ssu.Upload(); });
            }
            VerifyTracker(progressTracker, expectSuccess);
        }
Exemple #9
0
        public void SingleSegmentUploader_UploadSingleBlockStream()
        {
            var fe = new InMemoryFrontEnd();

            var metadata        = CreateMetadata(_smallFilePath, _smallFileContents.Length);
            var progressTracker = new TestProgressTracker();
            var ssu             = new SingleSegmentUploader(0, metadata, fe, progressTracker);

            ssu.UseBackOffRetryStrategy = false;
            ssu.Upload();

            var actualContents = fe.GetStreamContents(StreamPath);

            AssertExtensions.AreEqual(_smallFileContents, actualContents, "Unexpected uploaded stream contents.");
            VerifyTracker(progressTracker, true);
        }
        public void DataLakeUploader_TargetExistsNoOverwrite()
        {
            var frontEnd = new InMemoryFrontEnd();

            frontEnd.CreateStream(TargetStreamPath, true, null, 0);

            //no resume, no overwrite
            var up       = CreateParameters(filePath: _smallFilePath, isResume: false);
            var uploader = new DataLakeStoreTransferClient(up, frontEnd);

            Assert.Throws <InvalidOperationException>(() => uploader.Execute());

            //resume, no overwrite
            up       = CreateParameters(filePath: _smallFilePath, isResume: true);
            uploader = new DataLakeStoreTransferClient(up, frontEnd);
            Assert.Throws <InvalidOperationException>(() => uploader.Execute());

            //resume, overwrite
            up       = CreateParameters(filePath: _smallFilePath, isResume: true, isOverwrite: true);
            uploader = new DataLakeStoreTransferClient(up, frontEnd);
            uploader.Execute();

            //no resume, overwrite
            up       = CreateParameters(filePath: _smallFilePath, isResume: false, isOverwrite: true);
            uploader = new DataLakeStoreTransferClient(up, frontEnd);
            uploader.Execute();

            //download no resume, no overwrite
            up       = CreateParameters(filePath: TargetStreamPath, targetStreamPath: _downloadFilePath, isResume: false, isDownload: true);
            uploader = new DataLakeStoreTransferClient(up, frontEnd);
            Assert.Throws <InvalidOperationException>(() => uploader.Execute());

            //download resume, no overwrite
            up       = CreateParameters(filePath: TargetStreamPath, targetStreamPath: _downloadFilePath, isResume: true, isDownload: true);
            uploader = new DataLakeStoreTransferClient(up, frontEnd);
            Assert.Throws <InvalidOperationException>(() => uploader.Execute());

            //download resume, overwrite
            up       = CreateParameters(filePath: TargetStreamPath, targetStreamPath: _downloadFilePath, isResume: true, isOverwrite: true, isDownload: true);
            uploader = new DataLakeStoreTransferClient(up, frontEnd);
            uploader.Execute();

            //download no resume, overwrite
            up       = CreateParameters(filePath: TargetStreamPath, targetStreamPath: _downloadFilePath, isResume: false, isOverwrite: true, isDownload: true);
            uploader = new DataLakeStoreTransferClient(up, frontEnd);
            uploader.Execute();
        }
        public void MultipleSegmentUploader_MultipleSegments()
        {
            var fe       = new InMemoryFrontEnd();
            var metadata = CreateMetadata(10);

            try
            {
                var msu = new MultipleSegmentUploader(metadata, 1, fe);
                msu.UseSegmentBlockBackOffRetryStrategy = false;
                msu.Upload();
                VerifyTargetStreamsAreComplete(metadata, fe);
            }
            finally
            {
                metadata.DeleteFile();
            }
        }
        /// <summary>
        /// Verifies the file was successfully uploaded.
        /// </summary>
        /// <param name="up">The upload parameters.</param>
        /// <param name="frontEnd">The front end.</param>
        private void VerifyFileUploadedSuccessfully(TransferParameters up, InMemoryFrontEnd frontEnd)
        {
            if (up.IsRecursive)
            {
                var fileList = new Dictionary <string, byte[]>
                {
                    { string.Format("{0}/{1}", up.TargetStreamPath, Path.GetFileName(_largeFilePath)), _largeFileData },
                    { string.Format("{0}/{1}", up.TargetStreamPath, Path.GetFileName(_smallFilePath)), _smallFileData },
                    { string.Format("{0}/{1}", up.TargetStreamPath, Path.GetFileName(_downloadFilePath)), _smallFileData }
                };

                VerifyFileUploadedSuccessfully(fileList, frontEnd, up.IsDownload);
            }
            else
            {
                VerifyFileUploadedSuccessfully(up, frontEnd, _largeFileData);
            }
        }
        public void MultipleSegmentUploader_MultipleSegmentsAndMultipleThreads()
        {
            var fe          = new InMemoryFrontEnd();
            var metadata    = CreateMetadata(10);
            int threadCount = metadata.SegmentCount * 10; //intentionally setting this higher than the # of segments

            try
            {
                var msu = new MultipleSegmentUploader(metadata, threadCount, fe);
                msu.UseSegmentBlockBackOffRetryStrategy = false;
                msu.Upload();
                VerifyTargetStreamsAreComplete(metadata, fe);
            }
            finally
            {
                metadata.DeleteFile();
            }
        }
        public void DataLakeUploader_ResumePartialFolderUpload()
        {
            //attempt to load the file fully, but only allow creating 1 target stream
            var backingFrontEnd = new InMemoryFrontEnd();
            var frontEnd        = new MockableFrontEnd(backingFrontEnd);

            int createStreamCount = 0;

            frontEnd.CreateStreamImplementation = (path, overwrite, data, byteCount) =>
            {
                createStreamCount++;
                if (createStreamCount > 1)
                {
                    //we only allow 1 file to be created
                    throw new IntentionalException();
                }
                backingFrontEnd.CreateStream(path, overwrite, data, byteCount);
            };
            var up       = CreateParameters(isResume: false, isRecursive: true);
            var uploader = new DataLakeStoreTransferClient(up, frontEnd);

            uploader.DeleteMetadataFile();

            Assert.Throws <AggregateException>(() => uploader.Execute());
            Assert.Equal(1, frontEnd.ListDirectory(up.TargetStreamPath, false).Keys.Count);
            Assert.Equal(1, backingFrontEnd.StreamCount);

            //resume the upload but point it to the real back-end, which doesn't throw exceptions
            up       = CreateParameters(isResume: true, isRecursive: true);
            uploader = new DataLakeStoreTransferClient(up, backingFrontEnd);

            try
            {
                uploader.Execute();
            }
            finally
            {
                uploader.DeleteMetadataFile();
            }

            VerifyFileUploadedSuccessfully(up, backingFrontEnd);
        }
Exemple #15
0
        public void SingleSegmentUploader_UploadFileRange()
        {
            int length = _smallFileContents.Length / 3;

            var fe = new InMemoryFrontEnd();

            var metadata        = CreateMetadata(_smallFilePath, length);
            var progressTracker = new TestProgressTracker();
            var ssu             = new SingleSegmentUploader(0, metadata, fe, progressTracker);

            ssu.UseBackOffRetryStrategy = false;
            ssu.Upload();

            var actualContents   = fe.GetStreamContents(StreamPath);
            var expectedContents = new byte[length];

            Array.Copy(_smallFileContents, 0, expectedContents, 0, length);
            AssertExtensions.AreEqual(expectedContents, actualContents, "Unexpected uploaded stream contents.");
            VerifyTracker(progressTracker, true);
        }
Exemple #16
0
        public void SingleSegmentUploader_TargetStreamExists()
        {
            var fe = new InMemoryFrontEnd();

            //load up an existing stream
            fe.CreateStream(StreamPath, true, null, 0);
            var data = Encoding.UTF8.GetBytes("random");

            fe.AppendToStream(StreamPath, data, 0, (int)data.Length);

            //force a re-upload of the stream
            var metadata = CreateMetadata(_smallFilePath, _smallFileContents.Length);
            var ssu      = new SingleSegmentUploader(0, metadata, fe);

            ssu.UseBackOffRetryStrategy = false;
            ssu.Upload();

            var actualContents = fe.GetStreamContents(StreamPath);

            AssertExtensions.AreEqual(_smallFileContents, actualContents, "Unexpected uploaded stream contents.");
        }
Exemple #17
0
        public void SingleSegmentUploader_VerifyUploadStreamFails()
        {
            //create a mock front end which doesn't do anything
            var workingFrontEnd = new InMemoryFrontEnd();
            var fe = new MockableFrontEnd(workingFrontEnd);

            fe.CreateStreamImplementation    = (streamPath, overwrite, data, byteCount) => { };
            fe.DeleteStreamImplementation    = (streamPath, recurse, isDownload) => { };
            fe.StreamExistsImplementation    = (streamPath, isDownload) => { return(true); };
            fe.AppendToStreamImplementation  = (streamPath, data, offset, byteCount) => { };
            fe.GetStreamLengthImplementation = (streamPath, isDownload) => { return(0); };

            //upload some data
            var metadata = CreateMetadata(_smallFilePath, _smallFileContents.Length);
            var ssu      = new SingleSegmentUploader(0, metadata, fe);

            ssu.UseBackOffRetryStrategy = false;

            //the Upload method should fail if it cannot verify that the stream was uploaded after the upload (i.e., it will get a length of 0 at the end)
            Assert.Throws <TransferFailedException>(() => { ssu.Upload(); });
        }
        public void DataLakeUploader_FreshUploadDownload()
        {
            var frontEnd = new InMemoryFrontEnd();
            var up       = CreateParameters(isResume: false);
            TransferProgress progress = null;
            var syncRoot = new object();
            IProgress <TransferProgress> progressTracker = new Progress <TransferProgress>(
                (p) =>
            {
                lock (syncRoot)
                {
                    //it is possible that these come out of order because of race conditions (multiple threads reporting at the same time); only update if we are actually making progress
                    if (progress == null || progress.TransferredByteCount < p.TransferredByteCount)
                    {
                        progress = p;
                    }
                }
            });
            var uploader = new DataLakeStoreTransferClient(up, frontEnd, progressTracker);

            uploader.Execute();

            VerifyFileUploadedSuccessfully(up, frontEnd);
            VerifyProgressStatus(progress, _largeFileData.Length);

            Assert.Equal(ThreadCount, uploader.Parameters.PerFileThreadCount);

            // now download
            // change the thread count to be the default and validate that it is different.
            progress = null;
            up       = CreateParameters(isResume: false, isDownload: true, targetStreamPath: _downloadFilePath, isOverwrite: true, filePath: TargetStreamPath);
            up.PerFileThreadCount = -1;
            uploader = new DataLakeStoreTransferClient(up, frontEnd, progressTracker);

            uploader.Execute();

            VerifyFileUploadedSuccessfully(up, frontEnd);
            VerifyProgressStatus(progress, _largeFileData.Length);
            Assert.True(up.PerFileThreadCount > 0 && uploader.Parameters.PerFileThreadCount > 0);
        }
        public void DataLakeUploader_ResumePartialUploadDownload()
        {
            //attempt to load the file fully, but only allow creating 1 target stream
            var backingFrontEnd = new InMemoryFrontEnd();
            var frontEnd        = new MockableFrontEnd(backingFrontEnd);

            int createStreamCount = 0;

            frontEnd.CreateStreamImplementation = (path, overwrite, data, byteCount) =>
            {
                createStreamCount++;
                if (createStreamCount > 1)
                {
                    //we only allow 1 file to be created
                    throw new IntentionalException();
                }
                backingFrontEnd.CreateStream(path, overwrite, data, byteCount);
            };

            var up       = CreateParameters(isResume: false);
            var uploader = new DataLakeStoreTransferClient(up, frontEnd);

            uploader.DeleteMetadataFile();

            Assert.Throws <AggregateException>(() => uploader.Execute());
            Assert.Equal(1, frontEnd.ListDirectory(up.TargetStreamPath, false).Keys.Count);
            Assert.Equal(1, backingFrontEnd.StreamCount);

            //resume the upload but point it to the real back-end, which doesn't throw exceptions
            up       = CreateParameters(isResume: true);
            uploader = new DataLakeStoreTransferClient(up, backingFrontEnd);

            try
            {
                uploader.Execute();
            }
            finally
            {
                uploader.DeleteMetadataFile();
            }

            VerifyFileUploadedSuccessfully(up, backingFrontEnd);

            // now download the same way.
            var frontEnd2 = new MockableFrontEnd(backingFrontEnd); // need to have data from the successful upload available.

            createStreamCount = 0;
            frontEnd2.ReadStreamImplementation = (path, data, byteCount, isDownload) =>
            {
                createStreamCount++;
                if (createStreamCount > 1)
                {
                    //we only allow 1 file to be created
                    throw new IntentionalException();
                }
                return(backingFrontEnd.ReadStream(path, data, byteCount, isDownload));
            };

            up       = CreateParameters(isResume: false, isDownload: true, targetStreamPath: _downloadFilePath, isOverwrite: true, filePath: up.TargetStreamPath);
            uploader = new DataLakeStoreTransferClient(up, frontEnd2);

            Assert.Throws <AggregateException>(() => uploader.Execute());
            Assert.False(frontEnd2.StreamExists(up.TargetStreamPath), "Target stream should not have been created");

            // now use the good front end
            up       = CreateParameters(isResume: true, isDownload: true, targetStreamPath: _downloadFilePath, isOverwrite: true, filePath: up.InputFilePath);
            uploader = new DataLakeStoreTransferClient(up, backingFrontEnd);

            //resume the download but point it to the real back-end, which doesn't throw exceptions
            try
            {
                uploader.Execute();
            }
            finally
            {
                uploader.DeleteMetadataFile();
            }

            VerifyFileUploadedSuccessfully(up, backingFrontEnd);
        }
        public void DataLakeUploader_ResumeUploadDownloadWithAllMissingFiles()
        {
            //this scenario is achieved by refusing to execute the concat command on the front end for the initial upload (which will interrupt it)
            //and then resuming the upload against a fresh front-end (which obviously has no files there)

            var backingFrontEnd1 = new InMemoryFrontEnd();
            var frontEnd1        = new MockableFrontEnd(backingFrontEnd1);

            frontEnd1.ConcatenateImplementation = (target, inputs, isDownload) => { throw new IntentionalException(); }; //fail the concatenation

            //attempt full upload
            var up = CreateParameters(isResume: false);

            var uploader = new DataLakeStoreTransferClient(up, frontEnd1);

            uploader.DeleteMetadataFile();

            Assert.Throws <IntentionalException>(() => uploader.Execute());
            Assert.False(frontEnd1.StreamExists(up.TargetStreamPath), "Target stream should not have been created");
            Assert.True(0 < backingFrontEnd1.StreamCount, "No temporary streams seem to have been created");

            //attempt to resume the upload
            var frontEnd2 = new InMemoryFrontEnd();

            up       = CreateParameters(isResume: true);
            uploader = new DataLakeStoreTransferClient(up, frontEnd2);

            //at this point the metadata exists locally but there are no target files in frontEnd2
            try
            {
                uploader.Execute();
            }
            finally
            {
                uploader.DeleteMetadataFile();
            }

            VerifyFileUploadedSuccessfully(up, frontEnd2);

            // now download the same way.
            var frontEnd3 = new MockableFrontEnd(frontEnd2);                                                             // need to have data from the successful upload available.

            frontEnd3.ConcatenateImplementation = (target, inputs, isDownload) => { throw new IntentionalException(); }; //fail the concatenation
            up       = CreateParameters(isResume: false, isDownload: true, targetStreamPath: _downloadFilePath, isOverwrite: true, filePath: up.TargetStreamPath);
            uploader = new DataLakeStoreTransferClient(up, frontEnd3);

            Assert.Throws <IntentionalException>(() => uploader.Execute());
            Assert.False(frontEnd1.StreamExists(up.TargetStreamPath, true), "Target stream should not have been created");

            // now use the good front end
            up       = CreateParameters(isResume: true, isDownload: true, targetStreamPath: _downloadFilePath, isOverwrite: true, filePath: up.InputFilePath);
            uploader = new DataLakeStoreTransferClient(up, frontEnd2);
            //at this point the metadata exists locally but there are no target files in frontEnd2
            try
            {
                uploader.Execute();
            }
            finally
            {
                uploader.DeleteMetadataFile();
            }

            VerifyFileUploadedSuccessfully(up, frontEnd2);
        }
        public void DataLakeUploader_FreshFolderUploadDownload()
        {
            var frontEnd = new InMemoryFrontEnd();
            var up       = CreateParameters(isResume: false, isRecursive: true);

            // set the per file thread count to the default to validate computed values.
            up.PerFileThreadCount = -1;
            TransferFolderProgress progress = null;
            var syncRoot = new object();
            IProgress <TransferFolderProgress> progressTracker = new Progress <TransferFolderProgress>(
                (p) =>
            {
                lock (syncRoot)
                {
                    //it is possible that these come out of order because of race conditions (multiple threads reporting at the same time); only update if we are actually making progress
                    if (progress == null || progress.TransferredByteCount < p.TransferredByteCount)
                    {
                        progress = p;
                    }
                }
            });
            var uploader = new DataLakeStoreTransferClient(up, frontEnd, null, progressTracker);

            uploader.Execute();

            VerifyFileUploadedSuccessfully(up, frontEnd);
            VerifyFolderProgressStatus(progress, _largeFileData.Length + (_smallFileData.Length * 2), 3);

            // verify that per file thread count is different but concurrent is the same.
            Assert.True(up.PerFileThreadCount > 0 && uploader.Parameters.PerFileThreadCount > 0);
            Assert.Equal(2, uploader.Parameters.ConcurrentFileCount);

            // now download
            var downloadFrontEnd = new MockableFrontEnd(frontEnd);

            // replace the isDirectory implementation to return true
            downloadFrontEnd.IsDirectoryImplementation = (streamPath) => { return(true); };
            progress = null;
            up       = CreateParameters(isRecursive: true, isResume: false, isDownload: true, targetStreamPath: Path.GetDirectoryName(_downloadFilePath), isOverwrite: true, filePath: TargetStreamPath);

            // set concurrentFileCount to default and validate that it changed.
            up.ConcurrentFileCount = -1;
            uploader = new DataLakeStoreTransferClient(up, downloadFrontEnd, null, progressTracker);

            uploader.Execute();
            VerifyFileUploadedSuccessfully(up, downloadFrontEnd.BaseAdapter);
            VerifyFolderProgressStatus(progress, _largeFileData.Length + (_smallFileData.Length * 2), 3);

            Assert.True(up.ConcurrentFileCount > 0 && uploader.Parameters.ConcurrentFileCount > 0);
            Assert.Equal(ThreadCount, uploader.Parameters.PerFileThreadCount);

            // run it one more time with both as defaults
            up.PerFileThreadCount  = -1;
            up.ConcurrentFileCount = -1;
            uploader = new DataLakeStoreTransferClient(up, downloadFrontEnd, null, progressTracker);

            uploader.Execute();
            VerifyFileUploadedSuccessfully(up, downloadFrontEnd.BaseAdapter);
            VerifyFolderProgressStatus(progress, _largeFileData.Length + (_smallFileData.Length * 2), 3);
            Assert.True(up.ConcurrentFileCount > 0 && uploader.Parameters.ConcurrentFileCount > 0);
            Assert.True(up.PerFileThreadCount > 0 && uploader.Parameters.PerFileThreadCount > 0);
        }
 /// <summary>
 /// Verifies the file was successfully uploaded.
 /// </summary>
 /// <param name="up">The upload parameters.</param>
 /// <param name="frontEnd">The front end.</param>
 /// <param name="fileContents">The file contents.</param>
 private void VerifyFileUploadedSuccessfully(TransferParameters up, InMemoryFrontEnd frontEnd, byte[] fileContents)
 {
     VerifyFileUploadedSuccessfully(new Dictionary <string, byte[]> {
         { up.TargetStreamPath, fileContents }
     }, frontEnd, up.IsDownload);
 }
        private void TestRetry(int segmentFailCount)
        {
            //we only have access to the underlying FrontEnd, so we need to simulate many exceptions in order to force a segment to fail the upload (multiply by SingleSegmentUploader.MaxBufferUploadAttemptAccount)
            //this only works because we have a small file, which we know will fit in only one buffer (for a larger file, more complex operations are necessary)
            int  actualfailCount = segmentFailCount * SingleSegmentUploader.MaxBufferUploadAttemptCount;
            bool expectSuccess   = segmentFailCount < MultipleSegmentUploader.MaxUploadAttemptCount;

            int callCount = 0;

            //create a mock front end sitting on top of a working front end that simulates some erros for some time
            var workingFrontEnd = new InMemoryFrontEnd();
            var fe = new MockableFrontEnd(workingFrontEnd);

            fe.CreateStreamImplementation =
                (streamPath, overwrite, data, byteCount) =>
            {
                callCount++;
                if (callCount <= actualfailCount)
                {
                    throw new IntentionalException();
                }
                workingFrontEnd.CreateStream(streamPath, overwrite, data, byteCount);
            };

            fe.AppendToStreamImplementation =
                (streamPath, data, offset, byteCount) =>
            {
                callCount++;
                if (callCount <= actualfailCount)
                {
                    throw new IntentionalException();
                }
                workingFrontEnd.AppendToStream(streamPath, data, offset, byteCount);
            };

            var metadata = CreateMetadata(1);

            try
            {
                var msu = new MultipleSegmentUploader(metadata, 1, fe);
                msu.UseSegmentBlockBackOffRetryStrategy = false;

                if (expectSuccess)
                {
                    //the Upload method should not throw any exceptions in this case
                    msu.Upload();

                    //if we are expecting success, verify that both the metadata and the target streams are complete
                    VerifyTargetStreamsAreComplete(metadata, workingFrontEnd);
                }
                else
                {
                    //the Upload method should throw an aggregate exception in this case
                    Assert.Throws <AggregateException>(() => { msu.Upload(); });

                    //if we do not expect success, verify that at least 1 segment was marked as Failed
                    Assert.True(metadata.Segments.Any(s => s.Status == SegmentTransferStatus.Failed), "Could not find any failed segments");

                    //for every other segment, verify it was completed OK
                    foreach (var segment in metadata.Segments.Where(s => s.Status != SegmentTransferStatus.Failed))
                    {
                        VerifyTargetStreamIsComplete(segment, metadata, workingFrontEnd);
                    }
                }
            }
            finally
            {
                metadata.DeleteFile();
            }
        }
        /// <summary>
        /// Verifies the file was successfully uploaded.
        /// </summary>
        /// <param name="targetPathsAndData">The target paths and data for each path.</param>
        /// <param name="frontEnd">The front end to use.</param>
        private void VerifyFileUploadedSuccessfully(Dictionary <string, byte[]> targetPathsAndData, InMemoryFrontEnd frontEnd, bool isDownload)
        {
            var streamCount = targetPathsAndData.Keys.Count;

            Assert.Equal(streamCount, frontEnd.StreamCount);
            foreach (var path in targetPathsAndData.Keys)
            {
                Assert.True(frontEnd.StreamExists(path, isDownload), "Uploaded stream does not exist");
                Assert.Equal(targetPathsAndData[path].Length, frontEnd.GetStreamLength(path, isDownload));

                var uploadedData = frontEnd.GetStreamContents(path, isDownload);
                AssertExtensions.AreEqual(targetPathsAndData[path], uploadedData, "Uploaded stream is not binary identical to input file");
            }
        }
        private void VerifyTargetStreamIsComplete(TransferSegmentMetadata segmentMetadata, TransferMetadata metadata, InMemoryFrontEnd frontEnd)
        {
            Assert.Equal(SegmentTransferStatus.Complete, segmentMetadata.Status);
            Assert.True(frontEnd.StreamExists(segmentMetadata.Path), string.Format("Segment {0} was not uploaded", segmentMetadata.SegmentNumber));
            Assert.Equal(segmentMetadata.Length, frontEnd.GetStreamLength(segmentMetadata.Path));

            var actualContents   = frontEnd.GetStreamContents(segmentMetadata.Path);
            var expectedContents = GetExpectedContents(segmentMetadata, metadata);

            AssertExtensions.AreEqual(expectedContents, actualContents, "Segment {0} has unexpected contents", segmentMetadata.SegmentNumber);
        }