Exemple #1
0
        public static void JobError(string jobName, string message, ProgressRecorder progressRecorder, List <TransferDetail> failedFiles, List <TransferDetail> skippedFiles)
        {
            log.WriteLine("Error for Job: " + jobName);
            log.WriteLine("Error: " + message);
            log.WriteLine("WebJobs will make 5 attempts to rerun and complete");
            log.WriteLine(progressRecorder.ToString());

            if (failedFiles.Count > 0)
            {
                log.WriteLine("Detailed File Transfer Errors");
                foreach (TransferDetail td in failedFiles)
                {
                    log.WriteLine("Source File: " + td.Source);
                    log.WriteLine("Destination File: " + td.Destination);
                    log.WriteLine("Error Message: " + td.Error);
                }
            }

            if (skippedFiles.Count > 0)
            {
                log.WriteLine("Skipped File Details");
                foreach (TransferDetail td in skippedFiles)
                {
                    log.WriteLine("Source File: " + td.Source);
                    log.WriteLine("Destination File: " + td.Destination);
                    log.WriteLine("Error Message: " + td.Error);
                }
            }
        }
Exemple #2
0
        public static void JobComplete(string jobName, ProgressRecorder progressRecorder, List <TransferDetail> skippedFiles)
        {
            log.WriteLine("Job Complete: " + jobName);
            log.WriteLine(progressRecorder.ToString());

            if (skippedFiles.Count > 0)
            {
                log.WriteLine("Skipped File Details");
                foreach (TransferDetail td in skippedFiles)
                {
                    log.WriteLine("Source File: " + td.Source);
                    log.WriteLine("Destination File: " + td.Destination);
                    log.WriteLine("Error Message: " + td.Error);
                }
            }
        }
Exemple #3
0
        // This function will get triggered/executed when a new message is written on the Azure WebJobs Queue called backupqueue
        // This version uses CopyDirectoryAsync in DML 0.1. Blobs are copied in parallel using ForEachAsync()
        public async static Task ProcessQueueMessage([QueueTrigger("backupqueue")] CopyItem copyItem, TextWriter log, CancellationToken cancelToken)
        {
            _log = log;
            await log.WriteLineAsync("Job Start: " + copyItem.JobName);

            // This class accumulates transfer data during the process
            ProgressRecorder progressRecorder = new ProgressRecorder();

            try
            {
                // OpContext for pre-copy retries on Azure Storage
                // DML has its own context object and retry
                OperationContext opContext = new OperationContext();
                opContext.Retrying += StorageRequest_Retrying;

                // Define Blob Request Options
                BlobRequestOptions blobRequestOptions = new BlobRequestOptions
                {
                    // Defined Exponential Retry Policy
                    RetryPolicy = _retryPolicy
                };

                // The default number of parallel tasks in DML = # of Processors * 8
                // Set that as our max limit of parallel tasks to that amount since more gives us no additional performance
                //int parallelTasks = Environment.ProcessorCount * 8;
                int parallelTasks = Convert.ToInt32(ConfigurationManager.AppSettings["ParallelTasks"]);

                // Set the number of http connections to # of Processors * 8
                ServicePointManager.DefaultConnectionLimit = Environment.ProcessorCount * 8;

                // Save additional request round trip. We are not chunking and
                // uploading large amounts of data where we'd send 100's so set to false
                ServicePointManager.Expect100Continue = false;

                // CancellationTokenSource used to cancel the transfer
                CancellationTokenSource cancellationTokenSource = new CancellationTokenSource();

                // Represents a checkpoint from which a transfer may be resumed and continue
                // The checkpoint gets set on each call to CopyBlobAsync(). This allows the WebJob
                // to fail then pick it right up and continue to copy blobs, completing the copy job
                TransferCheckpoint transferCheckpoint = null;

                // Open connections to both storage accounts
                CloudStorageAccount sourceAccount      = GetAccount(copyItem.SourceAccountToken);
                CloudStorageAccount destinationAccount = GetAccount(copyItem.DestinationAccountToken);

                // Context object for the transfer, provides additional runtime information about its execution
                TransferContext transferContext = new TransferContext
                {
                    // Pipe transfer progress data to ProgressRecorder
                    ProgressHandler = progressRecorder,

                    // Callback to overwrite destination if it exists
                    OverwriteCallback = (source, destination) =>
                    {
                        return(OverwriteFile(source, destination, sourceAccount, destinationAccount, copyItem, blobRequestOptions, opContext));
                    }
                };

                CloudBlobDirectory sourceContainer = await GetDirectoryAsync(sourceAccount, copyItem.SourceContainer, blobRequestOptions);

                CloudBlobDirectory destinationContainer = await GetDirectoryAsync(destinationAccount, copyItem.DestinationContainer, blobRequestOptions);

                BlobContinuationToken continueToken = null;

                do
                {
                    // Fetch blobs in groups of 5000 max. If more than that loop until continue token is not null
                    var listTask = await sourceContainer.ListBlobsSegmentedAsync(true, BlobListingDetails.None, null, continueToken, blobRequestOptions, opContext, cancelToken);

                    // Save the continuation token
                    continueToken = listTask.ContinuationToken;

                    // Asynchronous parallel iteratation through blobs to copy
                    await listTask.Results.ForEachAsync(parallelTasks, async task =>
                    {
                        CloudBlob sourceBlob      = (CloudBlob)task;
                        CloudBlob destinationBlob = GetBlobReference(destinationContainer, sourceBlob);

                        // Copy the blob
                        await CopyBlobAsync(sourceBlob, destinationBlob, transferContext, transferCheckpoint, cancellationTokenSource);

                        // Check for cancellation
                        if (cancelToken.IsCancellationRequested)
                        {
                            await log.WriteLineAsync("Web Job Cancellation Requested");
                            cancellationTokenSource.Cancel();
                        }
                    });
                }while (continueToken != null);

                await log.WriteLineAsync(progressRecorder.ToString());

                await log.WriteLineAsync("Job Complete: " + copyItem.JobName);
            }
            catch (Exception ex)
            {
                await log.WriteLineAsync("Backup Job error: " + copyItem.JobName + ", Error: " + ex.Message);

                await log.WriteLineAsync(progressRecorder.ToString());
            }
        }
Exemple #4
0
        // This version uses CopyDirectoryAsync in DML 0.2. I'm not sure it is faster than what I did above copying them manually in DML 0.1
        public async static Task ProcessQueueMessage2([QueueTrigger("backupqueue")] CopyItem copyItem, TextWriter log, CancellationToken cancelToken)
        {
            _log = log;
            log.WriteLine("Job Start: " + copyItem.JobName);

            // This class accumulates transfer data during the process
            ProgressRecorder progressRecorder = new ProgressRecorder();

            try
            {
                // OpContext to track PreCopy Retries on Azure Storage
                // DML has its own context object and retry
                OperationContext opContext = new OperationContext();
                opContext.Retrying += StorageRequest_Retrying;

                // Define Blob Request Options
                BlobRequestOptions blobRequestOptions = new BlobRequestOptions
                {
                    // Defined Exponential Retry Policy above
                    RetryPolicy = _retryPolicy
                };

                // Set the number of parallel tasks in DML. This allows it to copy multiple
                // items at once when copying a container or directory
                //int parallelTasks = Environment.ProcessorCount * 8;
                int parallelTasks = Convert.ToInt32(ConfigurationManager.AppSettings["ParallelTasks"]);

                // Set the number of connections so each DML copy task has its own connection to Azure Storage
                ServicePointManager.DefaultConnectionLimit = Environment.ProcessorCount * 8;

                TransferManager.Configurations.ParallelOperations = parallelTasks; //64;

                log.WriteLine("Parallel Operations = " + parallelTasks.ToString());

                // Short circuit additional request round trips. We are not chunking and
                // uploading large amounts of data where we'd send 100's so set to false
                ServicePointManager.Expect100Continue = false;

                // CancellationTokenSource used to cancel the transfer
                CancellationTokenSource cancellationTokenSource = new CancellationTokenSource();

                // Represents a checkpoint from which a transfer may be resumed and continued
                // This is set within the CopyContainerAsync function
                TransferCheckpoint transferCheckpoint = null;

                // Open connections to both storage accounts
                CloudStorageAccount sourceAccount      = GetAccount(copyItem.SourceAccountToken);
                CloudStorageAccount destinationAccount = GetAccount(copyItem.DestinationAccountToken);

                // Context object for the transfer, provides additional runtime information about its execution
                TransferContext transferContext = new TransferContext
                {
                    // Pipe transfer progress data to ProgressRecorder
                    ProgressHandler = progressRecorder,

                    // Callback to overwrite destination if it exists
                    OverwriteCallback = (source, destination) =>
                    {
                        return(OverwriteFile(source, destination, sourceAccount, destinationAccount, copyItem, blobRequestOptions, opContext));
                    }
                };

                CopyDirectoryOptions copyDirectoryOptions = new CopyDirectoryOptions
                {
                    IncludeSnapshots = true,
                    Recursive        = true
                };

                // Get the root source and destination directories for the two containers to be copied
                CloudBlobDirectory sourceDirectory = await GetDirectoryAsync(sourceAccount, copyItem.SourceContainer, blobRequestOptions);

                CloudBlobDirectory destinationDirectory = await GetDirectoryAsync(destinationAccount, copyItem.DestinationContainer, blobRequestOptions);

                // Copy the container
                await CopyDirectoryAsync(sourceDirectory, destinationDirectory, copyDirectoryOptions, transferContext, transferCheckpoint, cancellationTokenSource);


                log.WriteLine(progressRecorder.ToString());
                log.WriteLine("Job Complete: " + copyItem.JobName);
            }
            catch (Exception ex)
            {
                log.WriteLine("Backup Job error: " + copyItem.JobName + ", Error: " + ex.Message);
                log.WriteLine(progressRecorder.ToString());
            }
        }