public void PopulateDrives() { if (StorageAccount == null) { return; } BackupPlanPathNodeRepository dao = new BackupPlanPathNodeRepository(); try { //IFileVersion version = BuildVersion(Plan); IList <Models.BackupPlanPathNode> drives = dao.GetAllDrivesByStorageAccount(StorageAccount); foreach (var drive in drives) { //EntryInfo info = new EntryInfo(TypeEnum.DRIVE, drive.Name, drive.Name, version); BackupPlanTreeNode driveNode = BackupPlanTreeNode.CreateDriveNode(drive); this.Nodes.Add(driveNode); RestoreNodeState(driveNode); } } catch (System.SystemException e) { ShowErrorMessage(e, null); } }
public BackupPlanPathNodeCreator(BackupPlanPathNodeRepository dao, ITransaction tx) { Assert.NotNull(dao); Assert.NotNull(tx); _dao = dao; _tx = tx; }
// // Loads or creates `RestorePlanFile`s for each file in `files`. // Returns the complete list of `RestorePlanFile`s that are related to `files`. // NOTE: Does not save to the database because this method is run by a secondary thread. // private LinkedList <Models.RestorePlanFile> DoLoadOrCreateRestorePlanFiles(Models.RestorePlan plan, LinkedList <CustomVersionedFile> files) { Assert.IsNotNull(plan); Assert.IsNotNull(files); Assert.IsNotNull(AllFilesFromPlan); LinkedList <Models.RestorePlanFile> result = new LinkedList <Models.RestorePlanFile>(); BackupPlanPathNodeRepository daoPathNode = new BackupPlanPathNodeRepository(); // Check all files. foreach (CustomVersionedFile file in files) { // Throw if the operation was canceled. CancellationToken.ThrowIfCancellationRequested(); // // Create or update `RestorePlanFile`. // Models.RestorePlanFile restorePlanFile = null; bool backupPlanFileAlreadyExists = AllFilesFromPlan.TryGetValue(file.Path, out restorePlanFile); if (!backupPlanFileAlreadyExists) { restorePlanFile = new Models.RestorePlanFile(plan, file.Path); restorePlanFile.CreatedAt = DateTime.UtcNow; } Models.BackupPlanPathNode pathNode = daoPathNode.GetByStorageAccountAndTypeAndPath(plan.StorageAccount, Models.EntryType.FILE, file.Path); Assert.IsNotNull(pathNode, string.Format("{0} has no corresponding {1}", file.Path, typeof(Models.BackupPlanPathNode).Name)); restorePlanFile.PathNode = pathNode; restorePlanFile.VersionedFile = file; result.AddLast(restorePlanFile); } return(result); }
private void Save(ISession session) { Assert.IsFalse(IsSaved); BatchProcessor batchProcessor = new BatchProcessor(); BackupRepository daoBackup = new BackupRepository(session); BackupPlanFileRepository daoBackupPlanFile = new BackupPlanFileRepository(session); BackupedFileRepository daoBackupedFile = new BackupedFileRepository(session); BackupPlanPathNodeRepository daoBackupPlanPathNode = new BackupPlanPathNodeRepository(session); #if false var FilesToTrack = SuppliedFiles.Union(ChangeSet.DeletedFiles); var FilesToInsertOrUpdate = from f in FilesToTrack where // Keep it so we'll later add or update a `BackupedFile`. ((f.LastStatus == Models.BackupFileStatus.ADDED || f.LastStatus == Models.BackupFileStatus.MODIFIED)) // Keep it if `LastStatus` is different from `PreviousLastStatus`. || ((f.LastStatus == Models.BackupFileStatus.REMOVED || f.LastStatus == Models.BackupFileStatus.DELETED) && (f.LastStatus != f.PreviousLastStatus)) // Skip all UNCHANGED files. select f; #else var FilesToTrack = SuppliedFiles; var FilesToInsertOrUpdate = from f in FilesToTrack where // Keep it so we'll later add or update a `BackupedFile`. ((f.LastStatus == Models.BackupFileStatus.ADDED || f.LastStatus == Models.BackupFileStatus.MODIFIED)) // Skip all UNCHANGED/DELETED/REMOVED files. select f; #endif BlockPerfStats stats = new BlockPerfStats(); using (ITransaction tx = session.BeginTransaction()) { try { // ------------------------------------------------------------------------------------ stats.Begin("STEP 1"); BackupPlanPathNodeCreator pathNodeCreator = new BackupPlanPathNodeCreator(daoBackupPlanPathNode, tx); // 1 - Split path into its components and INSERT new path nodes if they don't exist yet. foreach (Models.BackupPlanFile entry in FilesToInsertOrUpdate) { // Throw if the operation was canceled. CancellationToken.ThrowIfCancellationRequested(); try { entry.PathNode = pathNodeCreator.CreateOrUpdatePathNodes(Backup.BackupPlan.StorageAccount, entry); } catch (Exception ex) { string message = string.Format("BUG: Failed to create/update {0} => {1}", typeof(Models.BackupPlanPathNode).Name, CustomJsonSerializer.SerializeObject(entry, 1)); Results.OnError(this, message); logger.Log(LogLevel.Error, ex, message); throw; } batchProcessor.ProcessBatch(session); } batchProcessor.ProcessBatch(session, true); stats.End(); // ------------------------------------------------------------------------------------ stats.Begin("STEP 2"); // 2 - Insert/Update `BackupPlanFile`s as necessary. foreach (Models.BackupPlanFile entry in FilesToInsertOrUpdate) { // Throw if the operation was canceled. CancellationToken.ThrowIfCancellationRequested(); // IMPORTANT: It's important that we guarantee the referenced `BackupPlanFile` has a valid `Id` // before we reference it elsewhere, otherwise NHibernate won't have a valid value to put on // the `backup_plan_file_id` column. try { daoBackupPlanFile.InsertOrUpdate(tx, entry); // Guarantee it's saved } catch (Exception ex) { string message = string.Format("BUG: Failed to insert/update {0} => {1}", typeof(Models.BackupPlanFile).Name, CustomJsonSerializer.SerializeObject(entry, 1)); Results.OnError(this, message); logger.Log(LogLevel.Error, ex, message); logger.Error("Dump of failed object: {0}", entry.DumpMe()); throw; } batchProcessor.ProcessBatch(session); } batchProcessor.ProcessBatch(session, true); stats.End(); // ------------------------------------------------------------------------------------ stats.Begin("STEP 3"); // 3 - Insert/Update `BackupedFile`s as necessary and add them to the `Backup`. //List<Models.BackupedFile> backupedFiles = new List<Models.BackupedFile>(FilesToInsertOrUpdate.Count()); foreach (Models.BackupPlanFile entry in FilesToInsertOrUpdate) { // Throw if the operation was canceled. CancellationToken.ThrowIfCancellationRequested(); Models.BackupedFile backupedFile = daoBackupedFile.GetByBackupAndPath(Backup, entry.Path); if (backupedFile == null) // If we're resuming, this should already exist. { // Create `BackupedFile`. backupedFile = new Models.BackupedFile(Backup, entry); } backupedFile.FileSize = entry.LastSize; backupedFile.FileStatus = entry.LastStatus; backupedFile.FileLastWrittenAt = entry.LastWrittenAt; backupedFile.FileLastChecksum = entry.LastChecksum; switch (entry.LastStatus) { default: backupedFile.TransferStatus = default(TransferStatus); break; case Models.BackupFileStatus.REMOVED: case Models.BackupFileStatus.DELETED: backupedFile.TransferStatus = TransferStatus.COMPLETED; break; } backupedFile.UpdatedAt = DateTime.UtcNow; try { daoBackupedFile.InsertOrUpdate(tx, backupedFile); } catch (Exception ex) { logger.Log(LogLevel.Error, ex, "BUG: Failed to insert/update {0} => {1}", typeof(Models.BackupedFile).Name, CustomJsonSerializer.SerializeObject(backupedFile, 1)); throw; } //backupedFiles.Add(backupedFile); batchProcessor.ProcessBatch(session); } batchProcessor.ProcessBatch(session, true); stats.End(); // ------------------------------------------------------------------------------------ stats.Begin("STEP 4"); // 4 - Update all `BackupPlanFile`s that already exist for the backup plan associated with this backup operation. { var AllFilesFromPlanThatWerentUpdatedYet = AllFilesFromPlan.Values.Except(FilesToInsertOrUpdate); foreach (Models.BackupPlanFile file in AllFilesFromPlanThatWerentUpdatedYet) { // Throw if the operation was canceled. CancellationToken.ThrowIfCancellationRequested(); //Console.WriteLine("2: {0}", file.Path); try { daoBackupPlanFile.Update(tx, file); } catch (Exception ex) { string message = string.Format("BUG: Failed to update {0} => {1} ", typeof(Models.BackupPlanFile).Name, CustomJsonSerializer.SerializeObject(file, 1)); Results.OnError(this, message); logger.Log(LogLevel.Error, ex, message); throw; } batchProcessor.ProcessBatch(session); } } batchProcessor.ProcessBatch(session, true); stats.End(); // ------------------------------------------------------------------------------------ stats.Begin("STEP 5"); // 5 - Insert/Update `Backup` and its `BackupedFile`s into the database, also saving // the `BackupPlanFile`s instances that may have been changed by step 2. { //foreach (var bf in backupedFiles) //{ // // Throw if the operation was canceled. // CancellationToken.ThrowIfCancellationRequested(); // // Backup.Files.Add(bf); // // ProcessBatch(session); //} try { daoBackup.Update(tx, Backup); } catch (Exception ex) { string message = string.Format("BUG: Failed to update {0} => {1}", typeof(Models.Backup).Name, CustomJsonSerializer.SerializeObject(Backup, 1)); Results.OnError(this, message); logger.Log(LogLevel.Error, ex, message); throw; } } batchProcessor.ProcessBatch(session, true); stats.End(); // ------------------------------------------------------------------------------------ tx.Commit(); } catch (OperationCanceledException) { string message = "Operation cancelled"; Results.OnError(this, message); logger.Warn(message); tx.Rollback(); // Rollback the transaction throw; } catch (Exception ex) { string message = string.Format("Caught Exception: {0}", ex.Message); Results.OnError(this, message); logger.Log(LogLevel.Error, ex, message); tx.Rollback(); // Rollback the transaction throw; } finally { // ... } } IsSaved = true; // 6 - Create versioned files and remove files that won't belong to this backup. TransferSet.Files = GetFilesToTransfer(Backup, SuppliedFiles); // Test to see if things are okay! { var transferCount = TransferSet.Files.Count(); var filesCount = ChangeSet.AddedFiles.Count() + ChangeSet.ModifiedFiles.Count(); Assert.IsTrue(transferCount == filesCount, "TransferSet.Files must be equal (ChangeSet.AddedFiles + ChangeSet.ModifiedFiles)"); } }
// Summary: // Saves all instances from RemoteObjects list to the database. // Also removes them from RemoteObjects list to free memory. private void Save(CancellationToken CancellationToken) { ISession session = NHibernateHelper.GetSession(); BatchProcessor batchProcessor = new BatchProcessor(250); StorageAccountRepository daoStorageAccount = new StorageAccountRepository(session); BackupPlanFileRepository daoBackupPlanFile = new BackupPlanFileRepository(session); BackupPlanPathNodeRepository daoBackupPlanPathNode = new BackupPlanPathNodeRepository(session); BackupedFileRepository daoBackupedFile = new BackupedFileRepository(session); BlockPerfStats stats = new BlockPerfStats(); using (BatchTransaction tx = batchProcessor.BeginTransaction(session)) { try { // ------------------------------------------------------------------------------------ Models.StorageAccount account = daoStorageAccount.Get(Synchronization.StorageAccount.Id); // ------------------------------------------------------------------------------------ stats.Begin("STEP 1"); BackupPlanPathNodeCreator pathNodeCreator = new BackupPlanPathNodeCreator(daoBackupPlanPathNode, tx); // Report save progress ReportSaveProgress(SyncAgent.Results.Stats.SavedFileCount, true); // Saving loop for (int i = RemoteObjects.Count - 1; i >= 0; i--) { ListingObject obj = RemoteObjects[i]; // Get instance of object. //RemoteObjects[i] = null; RemoteObjects.RemoveAt(i); // Remove to free memory. RemoveAt(int) is O(N). // Throw if the operation was canceled. CancellationToken.ThrowIfCancellationRequested(); Models.EntryType type; string path = string.Empty; string versionString = string.Empty; try { // Parse obj.Key into its relevant parts. bool ok = ParseS3Key(obj.Key, out type, out path, out versionString); } catch (Exception ex) { if (ex is ArgumentException || ex is IndexOutOfRangeException) { // Report error. logger.Warn("Failed to parse S3 key: {0} -- Skipping.", obj.Key); //logger.Log(LogLevel.Warn, ex, "Failed to parse S3 key: {0}", obj.Key); //SyncAgent.Results.Stats.FailedSavedFileCount += 1; // Report save progress //ReportSaveProgress(SyncAgent.Results.Stats.SavedFileCount); continue; // Skip this file. } throw; } path = StringUtils.NormalizeUsingPreferredForm(path); DateTime lastWrittenAt = DateTime.ParseExact(versionString, Models.BackupedFile.VersionFormat, CultureInfo.InvariantCulture); // Create/Update BackupPlanFile, but do not SAVE it. Models.BackupPlanFile entry = daoBackupPlanFile.GetByStorageAccountAndPath(account, path); Models.BackupedFile version = null; if (entry == null) { // Create `BackupPlanFile`. entry = new Models.BackupPlanFile(); entry.BackupPlan = null; entry.StorageAccountType = account.Type; entry.StorageAccount = account; entry.Path = path; entry.LastSize = obj.Size; entry.LastWrittenAt = lastWrittenAt; //entry.LastChecksum = ; entry.LastStatus = Models.BackupFileStatus.UNCHANGED; entry.CreatedAt = DateTime.UtcNow; // Create `BackupedFile`. version = new Models.BackupedFile(null, entry, Synchronization); version.StorageAccountType = account.Type; version.StorageAccount = account; version.FileLastWrittenAt = lastWrittenAt; version.FileLastChecksum = entry.LastChecksum; version.FileSize = entry.LastSize; version.FileStatus = Models.BackupFileStatus.MODIFIED; version.TransferStatus = TransferStatus.COMPLETED; version.UpdatedAt = DateTime.UtcNow; entry.Versions.Add(version); //daoBackupedFile.Insert(tx, version); } else { // Update `BackupPlanFile`. entry.LastSize = obj.Size; entry.LastWrittenAt = lastWrittenAt; //entry.LastChecksum = //entry.LastStatus = Models.BackupFileStatus.MODIFIED; entry.UpdatedAt = DateTime.UtcNow; IList <Models.BackupedFile> versions = null; try { versions = daoBackupedFile.GetCompletedByStorageAccountAndPath(account, path, versionString); } catch (FormatException) { // Report error. logger.Warn("Failed to parse versionString: {0} -- Skipping.", versionString); //SyncAgent.Results.Stats.FailedSavedFileCount += 1; continue; // TODO(jweyrich): Should we abort? } // Check whether our database already contains this exact file + version. if (versions == null || (versions != null && versions.Count == 0)) { // Create `BackupedFile`. version = new Models.BackupedFile(null, entry, Synchronization); version.StorageAccountType = account.Type; version.StorageAccount = account; version.FileLastWrittenAt = entry.LastWrittenAt; version.FileLastChecksum = entry.LastChecksum; version.FileSize = entry.LastSize; version.FileStatus = Models.BackupFileStatus.MODIFIED; version.TransferStatus = TransferStatus.COMPLETED; version.UpdatedAt = DateTime.UtcNow; entry.Versions.Add(version); //daoBackupedFile.Insert(tx, version); } else { // Update `BackupedFile`. version = versions.First(); version.FileLastWrittenAt = entry.LastWrittenAt; version.FileLastChecksum = entry.LastChecksum; version.FileSize = entry.LastSize; version.UpdatedAt = DateTime.UtcNow; //daoBackupedFile.Update(tx, version); } } try { // Create path nodes and INSERT them, if they don't exist yet. entry.PathNode = pathNodeCreator.CreateOrUpdatePathNodes(account, entry); // Create or update `BackupPlanFile`. daoBackupPlanFile.InsertOrUpdate(tx, entry); } catch (Exception ex) { logger.Log(LogLevel.Error, ex, "BUG: Failed to insert/update {0} => {1}", typeof(Models.BackupPlanFile).Name, CustomJsonSerializer.SerializeObject(entry, 1)); logger.Error("Dump of failed object: {0}", entry.DumpMe()); throw; } bool didCommit = batchProcessor.ProcessBatch(tx); SyncAgent.Results.Stats.SavedFileCount += 1; // Report save progress ReportSaveProgress(SyncAgent.Results.Stats.SavedFileCount); } batchProcessor.ProcessBatch(tx, true); // Report save progress ReportSaveProgress(SyncAgent.Results.Stats.SavedFileCount, true); stats.End(); // ------------------------------------------------------------------------------------ tx.Commit(); } catch (OperationCanceledException) { tx.Rollback(); // Rollback the transaction throw; } catch (Exception ex) { logger.Log(LogLevel.Error, ex, "Caught exception"); tx.Rollback(); // Rollback the transaction throw; } finally { //session.Close(); if (session.IsConnected) { session.Disconnect(); } } } }
// // Summary: // 1. Create `RestorePlanFile`s and `RestoredFile`s as necessary and add them to the `Restore`. // 2. Insert/Update `Restore` and its `RestorededFile`s into the database, also saving // the `RestorePlanFile`s instances that may have been changed by step 1.2. // 3. Create versioned files and remove files that won't belong to this restore. // public void Save() { Assert.IsFalse(IsSaved); ISession session = NHibernateHelper.GetSession(); RestoreRepository daoRestore = new RestoreRepository(session); RestorePlanFileRepository daoRestorePlanFile = new RestorePlanFileRepository(session); RestoredFileRepository daoRestoredFile = new RestoredFileRepository(session); BackupPlanPathNodeRepository daoBackupPlanPathNode = new BackupPlanPathNodeRepository(session); var FilesToTrack = SuppliedFiles; var FilesToInsertOrUpdate = FilesToTrack; BlockPerfStats stats = new BlockPerfStats(); using (ITransaction tx = session.BeginTransaction()) { try { // ------------------------------------------------------------------------------------ stats.Begin("STEP 1"); // 1. Create `RestorePlanFile`s and `RestoredFile`s as necessary and add them to the `Restore`. foreach (Models.RestorePlanFile entry in FilesToInsertOrUpdate) { // Throw if the operation was canceled. CancellationToken.ThrowIfCancellationRequested(); // 1.1 - Insert/Update RestorePlanFile's and RestoredFile's if they don't exist yet. // IMPORTANT: It's important that we guarantee the referenced `RestorePlanFile` has a valid `Id` // before we reference it elsewhere, otherwise NHibernate won't have a valid value to put on // the `restore_plan_file_id` column. daoRestorePlanFile.InsertOrUpdate(tx, entry); // Guarantee it's saved Models.RestoredFile restoredFile = daoRestoredFile.GetByRestoreAndPath(Restore, entry.Path); if (restoredFile == null) // If we're resuming, this should already exist. { // Create `RestoredFile`. Models.BackupedFile backupedFile = entry.VersionedFile.UserData as Models.BackupedFile; restoredFile = new Models.RestoredFile(Restore, entry, backupedFile); } restoredFile.UpdatedAt = DateTime.UtcNow; daoRestoredFile.InsertOrUpdate(tx, restoredFile); Restore.Files.Add(restoredFile); //daoRestore.Update(tx, Restore); ProcessBatch(session); } ProcessBatch(session, true); stats.End(); // ------------------------------------------------------------------------------------ stats.Begin("STEP 2"); // 2. Insert/Update `Restore` and its `RestorededFile`s into the database, also saving // the `RestorePlanFile`s instances that may have been changed by step 1.2. { daoRestore.Update(tx, Restore); } ProcessBatch(session, true); stats.End(); // ------------------------------------------------------------------------------------ tx.Commit(); } catch (OperationCanceledException) { tx.Rollback(); // Rollback the transaction throw; } catch (Exception) { tx.Rollback(); // Rollback the transaction throw; } finally { //session.Close(); if (session.IsConnected) { session.Disconnect(); } } } IsSaved = true; // 3. Create versioned files and remove files that won't belong to this restore. TransferSet.Files = GetFilesToTransfer(Restore, SuppliedFiles); }
private Dictionary <string, BackupPlanTreeNodeData> ExpandCheckedDataSource( Dictionary <string, BackupPlanTreeNodeData> dict) { if (dict == null) { return(null); } Dictionary <string, BackupPlanTreeNodeData> expandedDict = new Dictionary <string, BackupPlanTreeNodeData>(dict.Count * 2); bool hasParents = false; // Expand paths into their respective parts. foreach (var obj in dict) { switch (obj.Value.Type) { default: throw new ArgumentException("Unhandled TypeEnum", "obj.Value.Type"); case TypeEnum.FILE_VERSION: case TypeEnum.FILE: case TypeEnum.FOLDER: hasParents = true; break; case TypeEnum.DRIVE: hasParents = false; break; } if (obj.Value.InfoObject == null) { obj.Value.InfoObject = new EntryInfo(obj.Value.Type, obj.Value.Name, obj.Value.Path, obj.Value.Version); } string nodeKey = BuildNodeKey(obj.Value, obj.Value.Version); if (!expandedDict.ContainsKey(nodeKey)) { expandedDict.Add(nodeKey, obj.Value); if (hasParents) { if (obj.Value.Type == TypeEnum.FILE_VERSION) { ExpandCheckedDataSourceFileVersionNode(expandedDict, obj.Value); } ExpandCheckedDataSourceAddParents(expandedDict, obj.Value.Path); } } } // Load all respective `BackupPlanPathNode`s. foreach (var obj in expandedDict) { BackupPlanTreeNodeData nodeData = obj.Value; if (nodeData.UserObject == null) { Models.EntryType nodeType = Models.EntryTypeExtensions.ToEntryType(nodeData.Type); if (nodeData.Type == TypeEnum.FILE_VERSION) { nodeType = Models.EntryType.FILE; } BackupPlanPathNodeRepository daoPathNode = new BackupPlanPathNodeRepository(); nodeData.UserObject = daoPathNode.GetByStorageAccountAndTypeAndPath(StorageAccount, nodeType, nodeData.Path); } } return(expandedDict); }