protected async Task <List <WorkItem> > GetWork(IWorkItemMgmt workQueue) { return(await retryPolicy.ExecuteAsync(async() => { return await workQueue.Fetch().ConfigureAwait(false); }).ConfigureAwait(false)); }
protected CopyJob(CopierOptions opts) { // Folder WorkItem mgmt needs late init, as we don't need more queues than folders! largeFileCopyQueue = WorkItemMgmtFactory.CreateAzureWorkItemMgmt(CloudObjectNameStrings.LargeFilesQueueName); WorkItemSubmissionController = WorkItemMgmtFactory.CreateAzureWorkItemSubmissionController(opts.WorkerCount, opts.WorkerId); folderDoneSet = AzureServiceFactory.GetFolderDoneSet(); originalWorkerId = opts.WorkerId; }
protected async Task <bool> IsThereWork(IWorkItemMgmt workQueue) { try { return(await retryPolicy.ExecuteAsync(async() => { return await workQueue.WorkAvailable().ConfigureAwait(true); }).ConfigureAwait(false)); } catch (Exception e) { Log.Debug(e.Message); return(false); } }
private async Task <int> GetQueueSize(IWorkItemMgmt workItemSource) { return(await workItemSource.GetCountOfOutstandingWork().ConfigureAwait(false)); }
/// <summary> /// Processes work items from the Azure storage queues. /// Based on current logic, we have 3 queues per job: /// - folder /// - file /// - largefile /// Only folder queues are differentiated based on the job / batch client number /// </summary> /// <param name="workQueue"></param> /// <param name="isFileQueue"></param> /// <returns></returns> private async Task ProcessWorkQueue(IWorkItemMgmt workQueue, bool isFileQueue) { int retryCount = 0; try { // we loop through several times, in case there are other workers still submitting stuff... while (retryCount < MaxQueueRetry) { bool thereIsWork = await IsThereWork(workQueue).ConfigureAwait(false); if (thereIsWork) { retryCount = 0; List <WorkItem> workitems = await GetWork(workQueue).ConfigureAwait(false); foreach (var workitem in workitems) { if (workitem != null && workitem.Empty == false) { if (isFileQueue) { azureFilesTargetStorage.CopyFile(workitem.SourcePath, workitem.TargetPath); } else { if (await FolderWasNotAlreadyCompleted(workitem).ConfigureAwait(false)) { Log.Always(FixedStrings.CreatingDirectory + workitem.TargetPath); if (!azureFilesTargetStorage.CreateFolder(workitem.TargetPath)) { Log.Always(ErrorStrings.FailedCopy + workitem.TargetPath); } await SubmitFolderWorkitems(localFileStorage.EnumerateFolders(workitem.SourcePath), opts).ConfigureAwait(true); await SubmitFileWorkItems(workitem.TargetPath, localFileStorage.EnumerateFiles(workitem.SourcePath)).ConfigureAwait(true); await folderDoneSet.Add(workitem.SourcePath).ConfigureAwait(false); } } } } await workQueue.CompleteWork().ConfigureAwait(true); } else { retryCount++; // jittering the retry Random rnd = new Random(); int sleepTime = rnd.Next(1, 3) * 250; Thread.Sleep(sleepTime); } } } catch (Exception cf) { Log.Always(ErrorStrings.ErrorProcessingWorkException); Log.Always(cf.Message); Log.Always(cf.StackTrace); Log.Always(cf.InnerException.Message); Log.Always(cf.InnerException.StackTrace); return; } Log.Always(FixedStrings.RanOutOfQueueMessages); }
/// <summary> /// Processes work items from the Azure storage queues. /// Based on current logic, we have 3 queues per job: /// - folder /// - file /// - largefile /// Only folder queues are differentiated based on the job / batch client number /// </summary> /// <param name="workQueue"></param> /// <param name="isFileQueue"></param> /// <returns></returns> private async Task ProcessWorkQueue(IWorkItemMgmt workQueue, bool isFileQueue) { int retryCount = 0; try { // we loop through several times, in case there are other workers still submitting stuff... while (retryCount < MaxQueueRetry) { bool thereIsWork = await IsThereWork(workQueue).ConfigureAwait(false); if (thereIsWork) { retryCount = 0; List <WorkItem> workitems = await GetWork(workQueue).ConfigureAwait(false); foreach (var workitem in workitems) { if (workitem != null && workitem.Empty == false) { if (isFileQueue) { if (azureBlobTargetStorage.CopyFile(workitem.SourcePath, workitem.TargetPath)) { workitem.Succeeded = true; } } else { // we do not create folders in blob storage, the folder names serve as file name prefix... if (await FolderWasNotAlreadyCompleted(workitem).ConfigureAwait(false)) { Log.Always(FixedStrings.CreatingDirectory + workitem.TargetPath); await SubmitFolderWorkitems(localFileStorage.EnumerateFolders(workitem.SourcePath), opts).ConfigureAwait(true); await SubmitFileWorkItems(workitem.TargetPath, localFileStorage.EnumerateFiles(workitem.SourcePath)).ConfigureAwait(true); } // Folder was done or already done // We don't want this message hanging around the queue... as they are annoying the sysadmin... await folderDoneSet.Add(workitem.SourcePath).ConfigureAwait(false); workitem.Succeeded = true; } } } await workQueue.CompleteWork().ConfigureAwait(true); } else { if (!isFileQueue) { // only folder queues should run out of work to do // file queues might need to sleep for work to appear retryCount++; Thread.Sleep(60000); // Folder queues sleep 60 seconds in case failed objects need to reappear... } // jittering the retry Log.Always("Unable to find work, retrying in a moment..."); Random rnd = new Random(); int sleepTime = rnd.Next(1, 3) * 10000; Thread.Sleep(sleepTime); } } } catch (Exception cf) { Log.Always(ErrorStrings.ErrorProcessingWorkException); Log.Always(cf.Message); Log.Always(cf.StackTrace); Log.Always(cf.InnerException.Message); Log.Always(cf.InnerException.StackTrace); return; } Log.Always(FixedStrings.RanOutOfQueueMessages); }