public static Task Run(BackupDatabase database, Options options, ITaskReader taskreader) { return(AutomationExtensions.RunTask(new { UploadChannel = Channels.BackendRequest.ForWrite }, async self => { if (options.IndexfilePolicy != Options.IndexFileStrategy.None) { foreach (var blockfile in await database.GetMissingIndexFilesAsync()) { if (!await taskreader.ProgressAsync) { return; } Logging.Log.WriteInformationMessage(LOGTAG, "RecreateMissingIndexFile", "Re-creating missing index file for {0}", blockfile); var w = await Common.IndexVolumeCreator.CreateIndexVolume(blockfile, options, database); if (!await taskreader.ProgressAsync) { return; } await database.UpdateRemoteVolumeAsync(w.RemoteFilename, RemoteVolumeState.Uploading, -1, null); await self.UploadChannel.WriteAsync(new IndexVolumeUploadRequest(w)); } } })); }
public static Task Run(BackupDatabase database, Options options, ITaskReader taskreader) { return(AutomationExtensions.RunTask( new { Input = Channels.OutputBlocks.ForRead, Output = Channels.BackendRequest.ForWrite, SpillPickup = Channels.SpillPickup.ForWrite, }, async self => { var noIndexFiles = options.IndexfilePolicy == Options.IndexFileStrategy.None; var fullIndexFiles = options.IndexfilePolicy == Options.IndexFileStrategy.Full; BlockVolumeWriter blockvolume = null; TemporaryIndexVolume indexvolume = null; try { while (true) { var b = await self.Input.ReadAsync(); // Lazy-start a new block volume if (blockvolume == null) { // Before we start a new volume, probe to see if it exists // This will delay creation of volumes for differential backups // There can be a race, such that two workers determine that // the block is missing, but this will be solved by the AddBlock call // which runs atomically if (await database.FindBlockIDAsync(b.HashKey, b.Size) >= 0) { b.TaskCompletion.TrySetResult(false); continue; } blockvolume = new BlockVolumeWriter(options); blockvolume.VolumeID = await database.RegisterRemoteVolumeAsync(blockvolume.RemoteFilename, RemoteVolumeType.Blocks, RemoteVolumeState.Temporary); indexvolume = noIndexFiles ? null : new TemporaryIndexVolume(options); } var newBlock = await database.AddBlockAsync(b.HashKey, b.Size, blockvolume.VolumeID); b.TaskCompletion.TrySetResult(newBlock); if (newBlock) { blockvolume.AddBlock(b.HashKey, b.Data, b.Offset, (int)b.Size, b.Hint); if (indexvolume != null) { indexvolume.AddBlock(b.HashKey, b.Size); if (b.IsBlocklistHashes && fullIndexFiles) { indexvolume.AddBlockListHash(b.HashKey, b.Size, b.Data); } } // If the volume is full, send to upload if (blockvolume.Filesize > options.VolumeSize - options.Blocksize) { //When uploading a new volume, we register the volumes and then flush the transaction // this ensures that the local database and remote storage are as closely related as possible await database.UpdateRemoteVolumeAsync(blockvolume.RemoteFilename, RemoteVolumeState.Uploading, -1, null); blockvolume.Close(); await database.CommitTransactionAsync("CommitAddBlockToOutputFlush"); FileEntryItem blockEntry = blockvolume.CreateFileEntryForUpload(options); TemporaryIndexVolume indexVolumeCopy = null; if (indexvolume != null) { indexVolumeCopy = new TemporaryIndexVolume(options); indexvolume.CopyTo(indexVolumeCopy, false); } var uploadRequest = new VolumeUploadRequest(blockvolume, blockEntry, indexVolumeCopy, options, database); blockvolume = null; indexvolume = null; // Write to output at the end here to prevent sending a full volume to the SpillCollector await self.Output.WriteAsync(uploadRequest); } } // We ignore the stop signal, but not the pause and terminate await taskreader.ProgressAsync; } } catch (Exception ex) { if (ex.IsRetiredException()) { // If we have collected data, merge all pending volumes into a single volume if (blockvolume != null && blockvolume.SourceSize > 0) { await self.SpillPickup.WriteAsync(new SpillVolumeRequest(blockvolume, indexvolume)); } } throw; } })); }
public static Task Run(BackupDatabase database, Options options, BackupResults result, ITaskReader taskreader, string lasttempfilelist, long lasttempfileid) { return(AutomationExtensions.RunTask(new { UploadChannel = Channels.BackendRequest.ForWrite }, async self => { // Check if we should upload a synthetic filelist if (options.DisableSyntheticFilelist || string.IsNullOrWhiteSpace(lasttempfilelist) || lasttempfileid < 0) { return; } // Check that we still need to process this after the cleanup has performed its duties var syntbase = await database.GetRemoteVolumeFromIDAsync(lasttempfileid); // If we do not have a valid entry, warn and quit if (syntbase.Name == null || syntbase.State != RemoteVolumeState.Uploaded) { // TODO: If the repair succeeds, this could give a false warning? Logging.Log.WriteWarningMessage(LOGTAG, "MissingTemporaryFilelist", null, "Expected there to be a temporary fileset for synthetic filelist ({0}, {1}), but none was found?", lasttempfileid, lasttempfilelist); return; } // Files is missing or repaired if (syntbase.Name == null || (syntbase.State != RemoteVolumeState.Uploading && syntbase.State != RemoteVolumeState.Temporary)) { Logging.Log.WriteInformationMessage(LOGTAG, "SkippingSyntheticListUpload", "Skipping synthetic upload because temporary fileset appers to be complete: ({0}, {1}, {2})", lasttempfileid, lasttempfilelist, syntbase.State); return; } // Ready to build and upload the synthetic list await database.CommitTransactionAsync("PreSyntheticFilelist"); var incompleteFilesets = (await database.GetIncompleteFilesetsAsync()).OrderBy(x => x.Value).ToList(); result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_PreviousBackupFinalize); Logging.Log.WriteInformationMessage(LOGTAG, "PreviousBackupFilelistUpload", "Uploading filelist from previous interrupted backup"); if (!await taskreader.ProgressAsync) { return; } var incompleteSet = incompleteFilesets.Last(); var badIds = from n in incompleteFilesets select n.Key; var prevs = (from n in await database.GetFilesetTimesAsync() where n.Key < incompleteSet.Key && !badIds.Contains(n.Key) orderby n.Key select n.Key).ToArray(); var prevId = prevs.Length == 0 ? -1 : prevs.Last(); FilesetVolumeWriter fsw = null; try { var s = 1; var fileTime = incompleteSet.Value + TimeSpan.FromSeconds(s); var oldFilesetID = incompleteSet.Key; // Probe for an unused filename while (s < 60) { var id = await database.GetRemoteVolumeIDAsync(VolumeBase.GenerateFilename(RemoteVolumeType.Files, options, null, fileTime)); if (id < 0) { break; } fileTime = incompleteSet.Value + TimeSpan.FromSeconds(++s); } fsw = new FilesetVolumeWriter(options, fileTime); fsw.VolumeID = await database.RegisterRemoteVolumeAsync(fsw.RemoteFilename, RemoteVolumeType.Files, RemoteVolumeState.Temporary); if (!string.IsNullOrEmpty(options.ControlFiles)) { foreach (var p in options.ControlFiles.Split(new char[] { System.IO.Path.PathSeparator }, StringSplitOptions.RemoveEmptyEntries)) { fsw.AddControlFile(p, options.GetCompressionHintFromFilename(p)); } } var newFilesetID = await database.CreateFilesetAsync(fsw.VolumeID, fileTime); await database.LinkFilesetToVolumeAsync(newFilesetID, fsw.VolumeID); await database.AppendFilesFromPreviousSetAsync(null, newFilesetID, prevId, fileTime); await database.WriteFilesetAsync(fsw, newFilesetID); if (!await taskreader.ProgressAsync) { return; } await database.UpdateRemoteVolumeAsync(fsw.RemoteFilename, RemoteVolumeState.Uploading, -1, null); await database.CommitTransactionAsync("CommitUpdateFilelistVolume"); await self.UploadChannel.WriteAsync(new FilesetUploadRequest(fsw)); fsw = null; } catch { await database.RollbackTransactionAsync(); throw; } finally { if (fsw != null) { try { fsw.Dispose(); } catch { fsw = null; } } } } )); }
public static Task Run(BackupResults result, BackupDatabase db, Options options, FilesetVolumeWriter filesetvolume, long filesetid, Common.ITaskReader taskreader) { return(AutomationExtensions.RunTask(new { Output = Channels.BackendRequest.ForWrite, }, async self => { if (!await taskreader.ProgressAsync) { return; } // Update the reported source and backend changes using (new Logging.Timer(LOGTAG, "UpdateChangeStatistics", "UpdateChangeStatistics")) await db.UpdateChangeStatisticsAsync(result); var changeCount = result.AddedFiles + result.ModifiedFiles + result.DeletedFiles + result.AddedFolders + result.ModifiedFolders + result.DeletedFolders + result.AddedSymlinks + result.ModifiedSymlinks + result.DeletedSymlinks; //Changes in the filelist triggers a filelist upload if (options.UploadUnchangedBackups || changeCount > 0) { using (new Logging.Timer(LOGTAG, "UploadNewFileset", "Uploading a new fileset")) { if (!string.IsNullOrEmpty(options.ControlFiles)) { foreach (var p in options.ControlFiles.Split(new char[] { System.IO.Path.PathSeparator }, StringSplitOptions.RemoveEmptyEntries)) { filesetvolume.AddControlFile(p, options.GetCompressionHintFromFilename(p)); } } if (!await taskreader.ProgressAsync) { return; } await db.WriteFilesetAsync(filesetvolume, filesetid); filesetvolume.Close(); if (!await taskreader.ProgressAsync) { return; } await db.UpdateRemoteVolumeAsync(filesetvolume.RemoteFilename, RemoteVolumeState.Uploading, -1, null); await db.CommitTransactionAsync("CommitUpdateRemoteVolume"); await self.Output.WriteAsync(new FilesetUploadRequest(filesetvolume)); } } else { Logging.Log.WriteVerboseMessage(LOGTAG, "RemovingLeftoverTempFile", "removing temp files, as no data needs to be uploaded"); await db.RemoveRemoteVolumeAsync(filesetvolume.RemoteFilename); } await db.CommitTransactionAsync("CommitUpdateRemoteVolume"); })); }