public void CopyFile(string destinationPath, string accountName, string sourcePath, CancellationToken cmdletCancellationToken, int threadCount = -1, bool overwrite = false, bool resume = false, bool isBinary = false, Cmdlet cmdletRunningRequest = null, ProgressRecord parentProgress = null) { FileType ignoredType; if (!overwrite && TestFileOrFolderExistence(destinationPath, accountName, out ignoredType)) { throw new InvalidOperationException(string.Format(Properties.Resources.LocalFileAlreadyExists, destinationPath)); } //TODO: defect: 4259238 (located here: http://vstfrd:8080/Azure/RD/_workitems/edit/4259238) needs to be resolved or the tracingadapter work around needs to be put back in // default the number of threads to use to the processor count if (threadCount < 1) { threadCount = Environment.ProcessorCount; } // Progress bar indicator. var description = string.Format("Copying File: {0} to DataLakeStore Location: {1} for account: {2}", sourcePath, destinationPath, accountName); var progress = new ProgressRecord( uniqueActivityIdGenerator.Next(0, 10000000), "Upload to DataLakeStore Store", description) { PercentComplete = 0 }; if (parentProgress != null) { progress.ParentActivityId = parentProgress.ActivityId; } // On update from the Data Lake store uploader, capture the progress. var progressTracker = new System.Progress<UploadProgress>(); progressTracker.ProgressChanged += (s, e) => { lock (ConsoleOutputLock) { progress.PercentComplete = (int) (1.0*e.UploadedByteCount/e.TotalFileLength*100); } }; var uploadParameters = new UploadParameters(sourcePath, destinationPath, accountName, threadCount, overwrite, resume, isBinary); var uploader = new DataLakeStoreUploader(uploadParameters, new DataLakeStoreFrontEndAdapter(accountName, _client, cmdletCancellationToken), cmdletCancellationToken, progressTracker); var previousExpect100 = ServicePointManager.Expect100Continue; try { ServicePointManager.Expect100Continue = false; // Execute the uploader. var uploadTask = Task.Run(() => { cmdletCancellationToken.ThrowIfCancellationRequested(); uploader.Execute(); cmdletCancellationToken.ThrowIfCancellationRequested(); }, cmdletCancellationToken); TrackUploadProgress(uploadTask, progress, cmdletRunningRequest, cmdletCancellationToken); } finally { ServicePointManager.Expect100Continue = previousExpect100; } }
public static bool UploadFile(DataLakeStoreFileSystemManagementClient dataLakeStoreFileSystemClient, string dlAccountName, string srcPath, string destPath, bool force = false, bool recursive = false, bool testCancel = false) { var cancelSource = new CancellationTokenSource(); var myToken = cancelSource.Token; var parameters = new UploadParameters(srcPath, destPath, dlAccountName, isOverwrite: force, isBinary: true, perFileThreadCount: 40, concurrentFileCount: 100, isRecursive: recursive); var progressTracker = new System.Progress <UploadFolderProgress>(); progressTracker.ProgressChanged += (s, e) => { if (e.TotalFileCount == 0) { Console.WriteLine("we are done!"); } }; var frontend = new DataLakeStoreFrontEndAdapter(dlAccountName, dataLakeStoreFileSystemClient, myToken); var uploader = new DataLakeStoreUploader(parameters, frontend, myToken, folderProgressTracker: progressTracker); if (testCancel) { var uploadTask = Task.Run(() => { myToken.ThrowIfCancellationRequested(); uploader.Execute(); myToken.ThrowIfCancellationRequested(); }, myToken); try { while (!uploadTask.IsCompleted && !uploadTask.IsCanceled) { if (myToken.IsCancellationRequested) { // we are done tracking progress and will just break and let the task clean itself up. try { uploadTask.Wait(); } catch (OperationCanceledException) { if (uploadTask.IsCanceled) { uploadTask.Dispose(); } } catch (AggregateException ex) { if (ex.InnerExceptions.OfType <OperationCanceledException>().Any()) { if (uploadTask.IsCanceled) { uploadTask.Dispose(); } } else { throw; } } catch (Exception ex) { // swallow this for debugging to see what it is. } break; } Thread.Sleep(60000); // run for 60 seconds and then cancel out and see what happens cancelSource.Cancel(); } } catch (OperationCanceledException) { // do nothing since we successfully cancelled out } catch (Exception ex) { // see what the heck is going on. } } else { uploader.Execute(); } return(true); }
public void CopyDirectory( string destinationFolderPath, string accountName, string sourceFolderPath, CancellationToken cmdletCancellationToken, int folderThreadCount = -1, int perFileThreadCount = -1, bool recursive = false, bool overwrite = false, bool resume = false, bool forceBinaryOrText = false, bool isBinary = false, Cmdlet cmdletRunningRequest = null) { var totalBytes = GetByteCountInDirectory(sourceFolderPath, recursive); var totalFiles = GetFileCountInDirectory(sourceFolderPath, recursive); var progress = new ProgressRecord( uniqueActivityIdGenerator.Next(0, 10000000), string.Format("Copying Folder: {0}{1}. Total bytes remaining: {2}. Total files remaining: {3}", sourceFolderPath, recursive ? " recursively" : string.Empty, totalBytes, totalFiles), "Copy in progress...") { PercentComplete = 0 }; UpdateProgress(progress, cmdletRunningRequest); var internalFolderThreads = folderThreadCount <= 0 ? Environment.ProcessorCount : folderThreadCount; var internalFileThreads = perFileThreadCount <= 0 ? Environment.ProcessorCount : perFileThreadCount; // we need to override the default .NET value for max connections to a host to our number of threads, if necessary (otherwise we won't achieve the parallelism we want) var previousDefaultConnectionLimit = ServicePointManager.DefaultConnectionLimit; var previousExpect100 = ServicePointManager.Expect100Continue; try { ServicePointManager.DefaultConnectionLimit = Math.Max((internalFolderThreads * internalFileThreads) + internalFolderThreads, ServicePointManager.DefaultConnectionLimit); ServicePointManager.Expect100Continue = false; // On update from the Data Lake store uploader, capture the progress. var progressTracker = new System.Progress<UploadFolderProgress>(); progressTracker.ProgressChanged += (s, e) => { lock (ConsoleOutputLock) { progress.PercentComplete = (int)(1.0 * e.UploadedByteCount / e.TotalFileLength * 100); progress.Activity = string.Format("Copying Folder: {0}{1}. Total bytes remaining: {2}. Total files remaining: {3}", sourceFolderPath, recursive ? " recursively" : string.Empty, e.TotalFileLength - e.UploadedByteCount, e.TotalFileCount - e.UploadedFileCount); } }; var uploadParameters = new UploadParameters(sourceFolderPath, destinationFolderPath, accountName, internalFileThreads, internalFolderThreads, isOverwrite: overwrite, isResume: resume, isBinary: isBinary, isRecursive: recursive); var uploader = new DataLakeStoreUploader(uploadParameters, new DataLakeStoreFrontEndAdapter(accountName, _client, cmdletCancellationToken), cmdletCancellationToken, folderProgressTracker: progressTracker); // Execute the uploader. var uploadTask = Task.Run(() => { cmdletCancellationToken.ThrowIfCancellationRequested(); uploader.Execute(); cmdletCancellationToken.ThrowIfCancellationRequested(); }, cmdletCancellationToken); TrackUploadProgress(uploadTask, progress, cmdletRunningRequest, cmdletCancellationToken); if (!cmdletCancellationToken.IsCancellationRequested) { progress.PercentComplete = 100; progress.RecordType = ProgressRecordType.Completed; UpdateProgress(progress, cmdletRunningRequest); } } catch (Exception e) { throw new CloudException(string.Format(Properties.Resources.UploadFailedMessage, e)); } finally { ServicePointManager.DefaultConnectionLimit = previousDefaultConnectionLimit; ServicePointManager.Expect100Continue = previousExpect100; } }
public void CopyFile(string destinationPath, string accountName, string sourcePath, CancellationToken cmdletCancellationToken, int threadCount = 10, bool overwrite = false, bool resume = false, bool isBinary = false, bool isDownload = false, Cmdlet cmdletRunningRequest = null, ProgressRecord parentProgress = null) { var previousTracing = ServiceClientTracing.IsEnabled; try { // disable this due to performance issues during download until issue: https://github.com/Azure/azure-powershell/issues/2499 is resolved. ServiceClientTracing.IsEnabled = false; FileType ignoredType; if (!overwrite && (!isDownload && TestFileOrFolderExistence(destinationPath, accountName, out ignoredType) || (isDownload && File.Exists(destinationPath)))) { throw new InvalidOperationException(string.Format(Properties.Resources.LocalFileAlreadyExists, destinationPath)); } if (threadCount < 1) { threadCount = 10; // 10 is the default per our documentation. } // Progress bar indicator. var description = string.Format("Copying {0} File: {1} {2} Location: {3} for account: {4}", isDownload ? "Data Lake Store" : "Local", sourcePath, isDownload ? "to local" : "to Data Lake Store", destinationPath, accountName); var progress = new ProgressRecord( uniqueActivityIdGenerator.Next(0, 10000000), string.Format("{0} Data Lake Store Store", isDownload ? "Download from" : "Upload to"), description) { PercentComplete = 0 }; if (parentProgress != null) { progress.ParentActivityId = parentProgress.ActivityId; } // On update from the Data Lake store uploader, capture the progress. var progressTracker = new System.Progress<UploadProgress>(); progressTracker.ProgressChanged += (s, e) => { lock (ConsoleOutputLock) { var toSet = (int)(1.0 * e.UploadedByteCount / e.TotalFileLength * 100); // powershell defect protection. If, through some defect in // our progress tracking, the number is outside of 0 - 100, // powershell will crash if it is set to that value. Instead // just keep the value unchanged in that case. if (toSet < 0 || toSet > 100) { progress.PercentComplete = progress.PercentComplete; } else { progress.PercentComplete = toSet; } } }; var uploadParameters = new UploadParameters(sourcePath, destinationPath, accountName, threadCount, isOverwrite: overwrite, isResume: resume, isBinary: isBinary, isDownload: isDownload); var uploader = new DataLakeStoreUploader(uploadParameters, new DataLakeStoreFrontEndAdapter(accountName, _client, cmdletCancellationToken), cmdletCancellationToken, progressTracker); var previousExpect100 = ServicePointManager.Expect100Continue; try { ServicePointManager.Expect100Continue = false; // Execute the uploader. var uploadTask = Task.Run(() => { cmdletCancellationToken.ThrowIfCancellationRequested(); uploader.Execute(); cmdletCancellationToken.ThrowIfCancellationRequested(); }, cmdletCancellationToken); TrackUploadProgress(uploadTask, progress, cmdletRunningRequest, cmdletCancellationToken); } catch (Exception e) { throw new CloudException(string.Format(Properties.Resources.UploadFailedMessage, e)); } finally { ServicePointManager.Expect100Continue = previousExpect100; } } finally { ServiceClientTracing.IsEnabled = previousTracing; } }
public void CopyDirectory( string destinationFolderPath, string accountName, string sourceFolderPath, CancellationToken cmdletCancellationToken, int concurrentFileCount = 5, int perFileThreadCount = 10, bool recursive = false, bool overwrite = false, bool resume = false, bool forceBinaryOrText = false, bool isBinary = false, bool isDownload = false, Cmdlet cmdletRunningRequest = null) { var totalBytes = GetByteCountInDirectory(sourceFolderPath, recursive, isDownload, accountName); var totalFiles = GetFileCountInDirectory(sourceFolderPath, recursive, isDownload, accountName); var progress = new ProgressRecord( uniqueActivityIdGenerator.Next(0, 10000000), string.Format("Copying Folder: {0}{1}. Total bytes remaining: {2}. Total files remaining: {3}", sourceFolderPath, recursive ? " recursively" : string.Empty, totalBytes, totalFiles), "Copy in progress...") { PercentComplete = 0 }; UpdateProgress(progress, cmdletRunningRequest); var internalFolderThreads = concurrentFileCount <= 0 ? 5 : concurrentFileCount; var internalFileThreads = perFileThreadCount <= 0 ? 10 : perFileThreadCount; // we need to override the default .NET value for max connections to a host to our number of threads, if necessary (otherwise we won't achieve the parallelism we want) var previousDefaultConnectionLimit = ServicePointManager.DefaultConnectionLimit; var previousExpect100 = ServicePointManager.Expect100Continue; var previousTracing = ServiceClientTracing.IsEnabled; try { // disable this due to performance issues during download until issue: https://github.com/Azure/azure-powershell/issues/2499 is resolved. ServiceClientTracing.IsEnabled = false; ServicePointManager.DefaultConnectionLimit = Math.Max((internalFolderThreads * internalFileThreads) + internalFolderThreads, ServicePointManager.DefaultConnectionLimit); ServicePointManager.Expect100Continue = false; // On update from the Data Lake store uploader, capture the progress. var progressTracker = new System.Progress<UploadFolderProgress>(); progressTracker.ProgressChanged += (s, e) => { lock (ConsoleOutputLock) { var toSet = (int)(1.0 * e.UploadedByteCount / e.TotalFileLength * 100); // powershell defect protection. If, through some defect in // our progress tracking, the number is outside of 0 - 100, // powershell will crash if it is set to that value. Instead // just keep the value unchanged in that case. if (toSet < 0 || toSet > 100) { progress.PercentComplete = progress.PercentComplete; } else { progress.PercentComplete = toSet; } progress.Activity = string.Format("Copying Folder: {0}{1}. Total bytes remaining: {2}. Total files remaining: {3}", sourceFolderPath, recursive ? " recursively" : string.Empty, e.TotalFileLength - e.UploadedByteCount, e.TotalFileCount - e.UploadedFileCount); } }; var uploadParameters = new UploadParameters(sourceFolderPath, destinationFolderPath, accountName, internalFileThreads, internalFolderThreads, isOverwrite: overwrite, isResume: resume, isBinary: isBinary, isRecursive: recursive, isDownload: isDownload); var uploader = new DataLakeStoreUploader(uploadParameters, new DataLakeStoreFrontEndAdapter(accountName, _client, cmdletCancellationToken), cmdletCancellationToken, folderProgressTracker: progressTracker); // Execute the uploader. var uploadTask = Task.Run(() => { cmdletCancellationToken.ThrowIfCancellationRequested(); uploader.Execute(); cmdletCancellationToken.ThrowIfCancellationRequested(); }, cmdletCancellationToken); TrackUploadProgress(uploadTask, progress, cmdletRunningRequest, cmdletCancellationToken); if (!cmdletCancellationToken.IsCancellationRequested) { progress.PercentComplete = 100; progress.RecordType = ProgressRecordType.Completed; UpdateProgress(progress, cmdletRunningRequest); } } catch (Exception e) { throw new CloudException(string.Format(Properties.Resources.UploadFailedMessage, e)); } finally { ServiceClientTracing.IsEnabled = previousTracing; ServicePointManager.DefaultConnectionLimit = previousDefaultConnectionLimit; ServicePointManager.Expect100Continue = previousExpect100; } }
public void CopyFile(string destinationPath, string accountName, string sourcePath, CancellationToken cmdletCancellationToken, int threadCount = -1, bool overwrite = false, bool resume = false, bool isBinary = false, Cmdlet cmdletRunningRequest = null, ProgressRecord parentProgress = null) { var originalValue = TracingAdapter.IsEnabled; try { //TODO: Remove this logic when defect: 4259238 (located here: http://vstfrd:8080/Azure/RD/_workitems/edit/4259238) is resolved TracingAdapter.IsEnabled = false; // default the number of threads to use to the processor count if (threadCount < 1) { threadCount = Environment.ProcessorCount; } // Progress bar indicator. var description = string.Format("Copying File: {0} to DataLakeStore Location: {1} for account: {2}", sourcePath, destinationPath, accountName); var progress = new ProgressRecord( uniqueActivityIdGenerator.Next(0, 10000000), "Upload to DataLakeStore Store", description) { PercentComplete = 0 }; if (parentProgress != null) { progress.ParentActivityId = parentProgress.ActivityId; } // On update from the Data Lake store uploader, capture the progress. var progressTracker = new System.Progress<UploadProgress>(); progressTracker.ProgressChanged += (s, e) => { lock (ConsoleOutputLock) { progress.PercentComplete = (int) (1.0*e.UploadedByteCount/e.TotalFileLength*100); } }; var uploadParameters = new UploadParameters(sourcePath, destinationPath, accountName, threadCount, overwrite, resume, isBinary); var uploader = new DataLakeStoreUploader(uploadParameters, new DataLakeStoreFrontEndAdapter(accountName, _client, cmdletCancellationToken), cmdletCancellationToken, progressTracker); // Execute the uploader. var uploadTask = Task.Run(() => { cmdletCancellationToken.ThrowIfCancellationRequested(); uploader.Execute(); cmdletCancellationToken.ThrowIfCancellationRequested(); }, cmdletCancellationToken); TrackUploadProgress(uploadTask, progress, cmdletRunningRequest, cmdletCancellationToken); } finally { TracingAdapter.IsEnabled = originalValue; } }