示例#1
0
        public static void JobError(string jobName, string message, ProgressRecorder progressRecorder, List <TransferDetail> failedFiles, List <TransferDetail> skippedFiles)
        {
            log.WriteLine("Error for Job: " + jobName);
            log.WriteLine("Error: " + message);
            log.WriteLine("WebJobs will make 5 attempts to rerun and complete");
            log.WriteLine(progressRecorder.ToString());

            if (failedFiles.Count > 0)
            {
                log.WriteLine("Detailed File Transfer Errors");
                foreach (TransferDetail td in failedFiles)
                {
                    log.WriteLine("Source File: " + td.Source);
                    log.WriteLine("Destination File: " + td.Destination);
                    log.WriteLine("Error Message: " + td.Error);
                }
            }

            if (skippedFiles.Count > 0)
            {
                log.WriteLine("Skipped File Details");
                foreach (TransferDetail td in skippedFiles)
                {
                    log.WriteLine("Source File: " + td.Source);
                    log.WriteLine("Destination File: " + td.Destination);
                    log.WriteLine("Error Message: " + td.Error);
                }
            }
        }
示例#2
0
        public static void JobComplete(string jobName, ProgressRecorder progressRecorder, List <TransferDetail> skippedFiles)
        {
            log.WriteLine("Job Complete: " + jobName);
            log.WriteLine(progressRecorder.ToString());

            if (skippedFiles.Count > 0)
            {
                log.WriteLine("Skipped File Details");
                foreach (TransferDetail td in skippedFiles)
                {
                    log.WriteLine("Source File: " + td.Source);
                    log.WriteLine("Destination File: " + td.Destination);
                    log.WriteLine("Error Message: " + td.Error);
                }
            }
        }
示例#3
0
        public async static Task ProcessMessage([QueueTrigger("backupqueue")] CopyItem copyItem, TextWriter log, CancellationToken cancelToken)
        {
            // Copy TextWrite into Log Helper class
            Logger.log = log;

            // Log Job Start
            await Logger.JobStartAsync(copyItem.JobName);

            // This class accumulates transfer data during the copy
            ProgressRecorder progressRecorder = new ProgressRecorder();

            try
            {
                // OpContext to track PreCopy Retries on Azure Storage
                // DML has its own context object and retry
                _opContext           = new OperationContext();
                _opContext.Retrying += StorageRequest_Retrying;

                // Define Blob Request Options
                _blobRequestOptions = new BlobRequestOptions
                {
                    // Defined Exponential Retry Policy above
                    RetryPolicy = _retryPolicy
                };

                // Set the number of parallel tasks in DML.
                // This allows it to copy multiple items at once when copying a container or directory
                // The best (and default value) is Environment.ProcessorCount * 8
                int parallelTasks = Environment.ProcessorCount * 8;
                TransferManager.Configurations.ParallelOperations = parallelTasks;

                // Set the number of connections.
                // This should match ParallelOperations so each DML copy task has its own connection to Azure Storage
                ServicePointManager.DefaultConnectionLimit = parallelTasks;

                // Short circuit additional request round trips. We are not chunking and
                // uploading large amounts of data where we'd send 100's so set to false
                ServicePointManager.Expect100Continue = false;

                // User Agent for tracing
                TransferManager.Configurations.UserAgentPrefix = "AzureDmlBackup";

                // CancellationTokenSource used to cancel the transfer
                CancellationTokenSource cancellationTokenSource = new CancellationTokenSource();

                // Open connections to both storage accounts
                CloudStorageAccount sourceAccount      = GetAccount(copyItem.SourceAccountToken);
                CloudStorageAccount destinationAccount = GetAccount(copyItem.DestinationAccountToken);

                // Represents a checkpoint from which a transfer may be resumed and continued.
                // This is initalized as null first time then hydrated within CopyDirectoryAsync().
                // However if this job is being resumed from a previous failure this function will hydrate
                // from a serialized checkpoint saved to blob storage.
                TransferCheckpoint transferCheckpoint = await GetTransferCheckpoint(copyItem.JobId);


                // Context object for the transfer, provides additional runtime information about its execution
                // If this is a resumed copy operation then pass the checkpoint to the TransferContext so it can resume the copy
                TransferContext transferContext = new TransferContext(transferCheckpoint)
                {
                    // Pipe transfer progress data to ProgressRecorder
                    // ProgressRecorder is used to log the results of the copy operation
                    ProgressHandler = progressRecorder,

                    // If the destination already exists this delegate is called.
                    // Return true to overwrite or false to skip the file during the transfer
                    OverwriteCallback = (source, destination) =>
                    {
                        return(OverwriteFile(source, destination, sourceAccount, destinationAccount, copyItem.IsIncremental));
                    }
                };

                // This event is used to log files skipped during the transfer
                transferContext.FileSkipped += TransferContext_FileSkipped;

                // This event is used to catch exceptions for files that fail during a transfer
                transferContext.FileFailed += TransferContext_FileFailed;

                // Set Options for copying the container such as search patterns, recursive, etc.
                CopyDirectoryOptions copyDirectoryOptions = new CopyDirectoryOptions
                {
                    IncludeSnapshots = true,
                    Recursive        = true
                };

                // Get the root source and destination directories for the two containers to be copied
                CloudBlobDirectory sourceDirectory = await GetDirectoryAsync(sourceAccount, copyItem.SourceContainer, copyItem.SourceDirectory);

                CloudBlobDirectory destinationDirectory = await GetDirectoryAsync(destinationAccount, copyItem.DestinationContainer, copyItem.DestinationDirectory);


                // Copy the container
                await CopyDirectoryAsync(copyItem.JobId, sourceDirectory, destinationDirectory, copyDirectoryOptions, transferContext, transferCheckpoint, cancellationTokenSource);


                // Check if any files failed during transfer
                if (_failedFiles.Count > 0)
                {
                    // Save a Checkpoint so we can restart the transfer
                    transferCheckpoint = transferContext.LastCheckpoint;
                    SaveTransferCheckpoint(copyItem.JobId, transferCheckpoint);
                    // Throw an exception to fail the job so WebJobs will rerun it
                    throw new Exception("One or more errors occurred during the transfer.");
                }

                // Log job completion
                await Logger.JobCompleteAsync(copyItem.JobName, progressRecorder, _skippedFiles);
            }
            catch (Exception ex)
            {
                // Log Job Error
                await Logger.JobErrorAsync(copyItem.JobName, ex.Message, progressRecorder, _failedFiles, _skippedFiles);

                // Rethrow the error to fail the web job
                throw ex;
            }
        }
示例#4
0
        // This function will get triggered/executed when a new message is written on the Azure WebJobs Queue called backupqueue
        // This version uses CopyDirectoryAsync in DML 0.1. Blobs are copied in parallel using ForEachAsync()
        public async static Task ProcessQueueMessage([QueueTrigger("backupqueue")] CopyItem copyItem, TextWriter log, CancellationToken cancelToken)
        {
            _log = log;
            await log.WriteLineAsync("Job Start: " + copyItem.JobName);

            // This class accumulates transfer data during the process
            ProgressRecorder progressRecorder = new ProgressRecorder();

            try
            {
                // OpContext for pre-copy retries on Azure Storage
                // DML has its own context object and retry
                OperationContext opContext = new OperationContext();
                opContext.Retrying += StorageRequest_Retrying;

                // Define Blob Request Options
                BlobRequestOptions blobRequestOptions = new BlobRequestOptions
                {
                    // Defined Exponential Retry Policy
                    RetryPolicy = _retryPolicy
                };

                // The default number of parallel tasks in DML = # of Processors * 8
                // Set that as our max limit of parallel tasks to that amount since more gives us no additional performance
                //int parallelTasks = Environment.ProcessorCount * 8;
                int parallelTasks = Convert.ToInt32(ConfigurationManager.AppSettings["ParallelTasks"]);

                // Set the number of http connections to # of Processors * 8
                ServicePointManager.DefaultConnectionLimit = Environment.ProcessorCount * 8;

                // Save additional request round trip. We are not chunking and
                // uploading large amounts of data where we'd send 100's so set to false
                ServicePointManager.Expect100Continue = false;

                // CancellationTokenSource used to cancel the transfer
                CancellationTokenSource cancellationTokenSource = new CancellationTokenSource();

                // Represents a checkpoint from which a transfer may be resumed and continue
                // The checkpoint gets set on each call to CopyBlobAsync(). This allows the WebJob
                // to fail then pick it right up and continue to copy blobs, completing the copy job
                TransferCheckpoint transferCheckpoint = null;

                // Open connections to both storage accounts
                CloudStorageAccount sourceAccount      = GetAccount(copyItem.SourceAccountToken);
                CloudStorageAccount destinationAccount = GetAccount(copyItem.DestinationAccountToken);

                // Context object for the transfer, provides additional runtime information about its execution
                TransferContext transferContext = new TransferContext
                {
                    // Pipe transfer progress data to ProgressRecorder
                    ProgressHandler = progressRecorder,

                    // Callback to overwrite destination if it exists
                    OverwriteCallback = (source, destination) =>
                    {
                        return(OverwriteFile(source, destination, sourceAccount, destinationAccount, copyItem, blobRequestOptions, opContext));
                    }
                };

                CloudBlobDirectory sourceContainer = await GetDirectoryAsync(sourceAccount, copyItem.SourceContainer, blobRequestOptions);

                CloudBlobDirectory destinationContainer = await GetDirectoryAsync(destinationAccount, copyItem.DestinationContainer, blobRequestOptions);

                BlobContinuationToken continueToken = null;

                do
                {
                    // Fetch blobs in groups of 5000 max. If more than that loop until continue token is not null
                    var listTask = await sourceContainer.ListBlobsSegmentedAsync(true, BlobListingDetails.None, null, continueToken, blobRequestOptions, opContext, cancelToken);

                    // Save the continuation token
                    continueToken = listTask.ContinuationToken;

                    // Asynchronous parallel iteratation through blobs to copy
                    await listTask.Results.ForEachAsync(parallelTasks, async task =>
                    {
                        CloudBlob sourceBlob      = (CloudBlob)task;
                        CloudBlob destinationBlob = GetBlobReference(destinationContainer, sourceBlob);

                        // Copy the blob
                        await CopyBlobAsync(sourceBlob, destinationBlob, transferContext, transferCheckpoint, cancellationTokenSource);

                        // Check for cancellation
                        if (cancelToken.IsCancellationRequested)
                        {
                            await log.WriteLineAsync("Web Job Cancellation Requested");
                            cancellationTokenSource.Cancel();
                        }
                    });
                }while (continueToken != null);

                await log.WriteLineAsync(progressRecorder.ToString());

                await log.WriteLineAsync("Job Complete: " + copyItem.JobName);
            }
            catch (Exception ex)
            {
                await log.WriteLineAsync("Backup Job error: " + copyItem.JobName + ", Error: " + ex.Message);

                await log.WriteLineAsync(progressRecorder.ToString());
            }
        }
示例#5
0
        // This version uses CopyDirectoryAsync in DML 0.2. I'm not sure it is faster than what I did above copying them manually in DML 0.1
        public async static Task ProcessQueueMessage2([QueueTrigger("backupqueue")] CopyItem copyItem, TextWriter log, CancellationToken cancelToken)
        {
            _log = log;
            log.WriteLine("Job Start: " + copyItem.JobName);

            // This class accumulates transfer data during the process
            ProgressRecorder progressRecorder = new ProgressRecorder();

            try
            {
                // OpContext to track PreCopy Retries on Azure Storage
                // DML has its own context object and retry
                OperationContext opContext = new OperationContext();
                opContext.Retrying += StorageRequest_Retrying;

                // Define Blob Request Options
                BlobRequestOptions blobRequestOptions = new BlobRequestOptions
                {
                    // Defined Exponential Retry Policy above
                    RetryPolicy = _retryPolicy
                };

                // Set the number of parallel tasks in DML. This allows it to copy multiple
                // items at once when copying a container or directory
                //int parallelTasks = Environment.ProcessorCount * 8;
                int parallelTasks = Convert.ToInt32(ConfigurationManager.AppSettings["ParallelTasks"]);

                // Set the number of connections so each DML copy task has its own connection to Azure Storage
                ServicePointManager.DefaultConnectionLimit = Environment.ProcessorCount * 8;

                TransferManager.Configurations.ParallelOperations = parallelTasks; //64;

                log.WriteLine("Parallel Operations = " + parallelTasks.ToString());

                // Short circuit additional request round trips. We are not chunking and
                // uploading large amounts of data where we'd send 100's so set to false
                ServicePointManager.Expect100Continue = false;

                // CancellationTokenSource used to cancel the transfer
                CancellationTokenSource cancellationTokenSource = new CancellationTokenSource();

                // Represents a checkpoint from which a transfer may be resumed and continued
                // This is set within the CopyContainerAsync function
                TransferCheckpoint transferCheckpoint = null;

                // Open connections to both storage accounts
                CloudStorageAccount sourceAccount      = GetAccount(copyItem.SourceAccountToken);
                CloudStorageAccount destinationAccount = GetAccount(copyItem.DestinationAccountToken);

                // Context object for the transfer, provides additional runtime information about its execution
                TransferContext transferContext = new TransferContext
                {
                    // Pipe transfer progress data to ProgressRecorder
                    ProgressHandler = progressRecorder,

                    // Callback to overwrite destination if it exists
                    OverwriteCallback = (source, destination) =>
                    {
                        return(OverwriteFile(source, destination, sourceAccount, destinationAccount, copyItem, blobRequestOptions, opContext));
                    }
                };

                CopyDirectoryOptions copyDirectoryOptions = new CopyDirectoryOptions
                {
                    IncludeSnapshots = true,
                    Recursive        = true
                };

                // Get the root source and destination directories for the two containers to be copied
                CloudBlobDirectory sourceDirectory = await GetDirectoryAsync(sourceAccount, copyItem.SourceContainer, blobRequestOptions);

                CloudBlobDirectory destinationDirectory = await GetDirectoryAsync(destinationAccount, copyItem.DestinationContainer, blobRequestOptions);

                // Copy the container
                await CopyDirectoryAsync(sourceDirectory, destinationDirectory, copyDirectoryOptions, transferContext, transferCheckpoint, cancellationTokenSource);


                log.WriteLine(progressRecorder.ToString());
                log.WriteLine("Job Complete: " + copyItem.JobName);
            }
            catch (Exception ex)
            {
                log.WriteLine("Backup Job error: " + copyItem.JobName + ", Error: " + ex.Message);
                log.WriteLine(progressRecorder.ToString());
            }
        }