public async Task <FileVersionerResults> DoVersion(Models.Backup backup, LinkedList <string> filePaths, bool newVersion) { Assert.IsNotNull(backup); Assert.AreEqual(TransferStatus.RUNNING, backup.Status); Assert.IsNotNull(filePaths); Results.Reset(); await ExecuteOnBackround(() => { ISession session = NHibernateHelper.GetSession(); try { BackupRepository daoBackup = new BackupRepository(session); BackupPlanFileRepository daoBackupPlanFile = new BackupPlanFileRepository(session); Backup = daoBackup.Get(backup.Id); IList <Models.BackupPlanFile> list = newVersion ? daoBackupPlanFile.GetAllByBackupPlan(Backup.BackupPlan) : daoBackupPlanFile.GetAllPendingByBackup(Backup); AllFilesFromPlan = list.ToDictionary <Models.BackupPlanFile, string>(p => p.Path); Execute(Backup, filePaths, newVersion); Save(session); } catch (Exception ex) { string message = string.Format("File versioning FAILED with an exception: {0}", ex.Message); Results.OnError(this, message); logger.Log(LogLevel.Error, ex, message); throw ex; } finally { //session.Close(); if (session.IsConnected) { session.Disconnect(); } } }, CancellationToken); return(Results); }
// Update specific `BackupPlanFile`s that exist and are NOT yet associated to a `BackupPlan`. protected void DoUpdateSyncedFiles(Models.Backup backup, LinkedList <string> filesToProcess) { BackupPlanFileRepository dao = new BackupPlanFileRepository(); long totalUpdates = 0; foreach (var path in filesToProcess) { // There's NO NEED to SELECT and UPDATE if we can UPDATE directly using a WHERE clause. totalUpdates += dao.AssociateSyncedFileToBackupPlan(backup.BackupPlan, path); } if (totalUpdates > 0) { logger.Info("Associated {0} synced files to Backup Plan {1}", totalUpdates, backup.BackupPlan.Name); } else { logger.Info("There are no synced files to associate to Backup Plan {0}", backup.BackupPlan.Name); } }
private void Save(ISession session) { Assert.IsFalse(IsSaved); BatchProcessor batchProcessor = new BatchProcessor(); BackupRepository daoBackup = new BackupRepository(session); BackupPlanFileRepository daoBackupPlanFile = new BackupPlanFileRepository(session); BackupedFileRepository daoBackupedFile = new BackupedFileRepository(session); BackupPlanPathNodeRepository daoBackupPlanPathNode = new BackupPlanPathNodeRepository(session); #if false var FilesToTrack = SuppliedFiles.Union(ChangeSet.DeletedFiles); var FilesToInsertOrUpdate = from f in FilesToTrack where // Keep it so we'll later add or update a `BackupedFile`. ((f.LastStatus == Models.BackupFileStatus.ADDED || f.LastStatus == Models.BackupFileStatus.MODIFIED)) // Keep it if `LastStatus` is different from `PreviousLastStatus`. || ((f.LastStatus == Models.BackupFileStatus.REMOVED || f.LastStatus == Models.BackupFileStatus.DELETED) && (f.LastStatus != f.PreviousLastStatus)) // Skip all UNCHANGED files. select f; #else var FilesToTrack = SuppliedFiles; var FilesToInsertOrUpdate = from f in FilesToTrack where // Keep it so we'll later add or update a `BackupedFile`. ((f.LastStatus == Models.BackupFileStatus.ADDED || f.LastStatus == Models.BackupFileStatus.MODIFIED)) // Skip all UNCHANGED/DELETED/REMOVED files. select f; #endif BlockPerfStats stats = new BlockPerfStats(); using (ITransaction tx = session.BeginTransaction()) { try { // ------------------------------------------------------------------------------------ stats.Begin("STEP 1"); BackupPlanPathNodeCreator pathNodeCreator = new BackupPlanPathNodeCreator(daoBackupPlanPathNode, tx); // 1 - Split path into its components and INSERT new path nodes if they don't exist yet. foreach (Models.BackupPlanFile entry in FilesToInsertOrUpdate) { // Throw if the operation was canceled. CancellationToken.ThrowIfCancellationRequested(); try { entry.PathNode = pathNodeCreator.CreateOrUpdatePathNodes(Backup.BackupPlan.StorageAccount, entry); } catch (Exception ex) { string message = string.Format("BUG: Failed to create/update {0} => {1}", typeof(Models.BackupPlanPathNode).Name, CustomJsonSerializer.SerializeObject(entry, 1)); Results.OnError(this, message); logger.Log(LogLevel.Error, ex, message); throw; } batchProcessor.ProcessBatch(session); } batchProcessor.ProcessBatch(session, true); stats.End(); // ------------------------------------------------------------------------------------ stats.Begin("STEP 2"); // 2 - Insert/Update `BackupPlanFile`s as necessary. foreach (Models.BackupPlanFile entry in FilesToInsertOrUpdate) { // Throw if the operation was canceled. CancellationToken.ThrowIfCancellationRequested(); // IMPORTANT: It's important that we guarantee the referenced `BackupPlanFile` has a valid `Id` // before we reference it elsewhere, otherwise NHibernate won't have a valid value to put on // the `backup_plan_file_id` column. try { daoBackupPlanFile.InsertOrUpdate(tx, entry); // Guarantee it's saved } catch (Exception ex) { string message = string.Format("BUG: Failed to insert/update {0} => {1}", typeof(Models.BackupPlanFile).Name, CustomJsonSerializer.SerializeObject(entry, 1)); Results.OnError(this, message); logger.Log(LogLevel.Error, ex, message); logger.Error("Dump of failed object: {0}", entry.DumpMe()); throw; } batchProcessor.ProcessBatch(session); } batchProcessor.ProcessBatch(session, true); stats.End(); // ------------------------------------------------------------------------------------ stats.Begin("STEP 3"); // 3 - Insert/Update `BackupedFile`s as necessary and add them to the `Backup`. //List<Models.BackupedFile> backupedFiles = new List<Models.BackupedFile>(FilesToInsertOrUpdate.Count()); foreach (Models.BackupPlanFile entry in FilesToInsertOrUpdate) { // Throw if the operation was canceled. CancellationToken.ThrowIfCancellationRequested(); Models.BackupedFile backupedFile = daoBackupedFile.GetByBackupAndPath(Backup, entry.Path); if (backupedFile == null) // If we're resuming, this should already exist. { // Create `BackupedFile`. backupedFile = new Models.BackupedFile(Backup, entry); } backupedFile.FileSize = entry.LastSize; backupedFile.FileStatus = entry.LastStatus; backupedFile.FileLastWrittenAt = entry.LastWrittenAt; backupedFile.FileLastChecksum = entry.LastChecksum; switch (entry.LastStatus) { default: backupedFile.TransferStatus = default(TransferStatus); break; case Models.BackupFileStatus.REMOVED: case Models.BackupFileStatus.DELETED: backupedFile.TransferStatus = TransferStatus.COMPLETED; break; } backupedFile.UpdatedAt = DateTime.UtcNow; try { daoBackupedFile.InsertOrUpdate(tx, backupedFile); } catch (Exception ex) { logger.Log(LogLevel.Error, ex, "BUG: Failed to insert/update {0} => {1}", typeof(Models.BackupedFile).Name, CustomJsonSerializer.SerializeObject(backupedFile, 1)); throw; } //backupedFiles.Add(backupedFile); batchProcessor.ProcessBatch(session); } batchProcessor.ProcessBatch(session, true); stats.End(); // ------------------------------------------------------------------------------------ stats.Begin("STEP 4"); // 4 - Update all `BackupPlanFile`s that already exist for the backup plan associated with this backup operation. { var AllFilesFromPlanThatWerentUpdatedYet = AllFilesFromPlan.Values.Except(FilesToInsertOrUpdate); foreach (Models.BackupPlanFile file in AllFilesFromPlanThatWerentUpdatedYet) { // Throw if the operation was canceled. CancellationToken.ThrowIfCancellationRequested(); //Console.WriteLine("2: {0}", file.Path); try { daoBackupPlanFile.Update(tx, file); } catch (Exception ex) { string message = string.Format("BUG: Failed to update {0} => {1} ", typeof(Models.BackupPlanFile).Name, CustomJsonSerializer.SerializeObject(file, 1)); Results.OnError(this, message); logger.Log(LogLevel.Error, ex, message); throw; } batchProcessor.ProcessBatch(session); } } batchProcessor.ProcessBatch(session, true); stats.End(); // ------------------------------------------------------------------------------------ stats.Begin("STEP 5"); // 5 - Insert/Update `Backup` and its `BackupedFile`s into the database, also saving // the `BackupPlanFile`s instances that may have been changed by step 2. { //foreach (var bf in backupedFiles) //{ // // Throw if the operation was canceled. // CancellationToken.ThrowIfCancellationRequested(); // // Backup.Files.Add(bf); // // ProcessBatch(session); //} try { daoBackup.Update(tx, Backup); } catch (Exception ex) { string message = string.Format("BUG: Failed to update {0} => {1}", typeof(Models.Backup).Name, CustomJsonSerializer.SerializeObject(Backup, 1)); Results.OnError(this, message); logger.Log(LogLevel.Error, ex, message); throw; } } batchProcessor.ProcessBatch(session, true); stats.End(); // ------------------------------------------------------------------------------------ tx.Commit(); } catch (OperationCanceledException) { string message = "Operation cancelled"; Results.OnError(this, message); logger.Warn(message); tx.Rollback(); // Rollback the transaction throw; } catch (Exception ex) { string message = string.Format("Caught Exception: {0}", ex.Message); Results.OnError(this, message); logger.Log(LogLevel.Error, ex, message); tx.Rollback(); // Rollback the transaction throw; } finally { // ... } } IsSaved = true; // 6 - Create versioned files and remove files that won't belong to this backup. TransferSet.Files = GetFilesToTransfer(Backup, SuppliedFiles); // Test to see if things are okay! { var transferCount = TransferSet.Files.Count(); var filesCount = ChangeSet.AddedFiles.Count() + ChangeSet.ModifiedFiles.Count(); Assert.IsTrue(transferCount == filesCount, "TransferSet.Files must be equal (ChangeSet.AddedFiles + ChangeSet.ModifiedFiles)"); } }
// Summary: // Saves all instances from RemoteObjects list to the database. // Also removes them from RemoteObjects list to free memory. private void Save(CancellationToken CancellationToken) { ISession session = NHibernateHelper.GetSession(); BatchProcessor batchProcessor = new BatchProcessor(250); StorageAccountRepository daoStorageAccount = new StorageAccountRepository(session); BackupPlanFileRepository daoBackupPlanFile = new BackupPlanFileRepository(session); BackupPlanPathNodeRepository daoBackupPlanPathNode = new BackupPlanPathNodeRepository(session); BackupedFileRepository daoBackupedFile = new BackupedFileRepository(session); BlockPerfStats stats = new BlockPerfStats(); using (BatchTransaction tx = batchProcessor.BeginTransaction(session)) { try { // ------------------------------------------------------------------------------------ Models.StorageAccount account = daoStorageAccount.Get(Synchronization.StorageAccount.Id); // ------------------------------------------------------------------------------------ stats.Begin("STEP 1"); BackupPlanPathNodeCreator pathNodeCreator = new BackupPlanPathNodeCreator(daoBackupPlanPathNode, tx); // Report save progress ReportSaveProgress(SyncAgent.Results.Stats.SavedFileCount, true); // Saving loop for (int i = RemoteObjects.Count - 1; i >= 0; i--) { ListingObject obj = RemoteObjects[i]; // Get instance of object. //RemoteObjects[i] = null; RemoteObjects.RemoveAt(i); // Remove to free memory. RemoveAt(int) is O(N). // Throw if the operation was canceled. CancellationToken.ThrowIfCancellationRequested(); Models.EntryType type; string path = string.Empty; string versionString = string.Empty; try { // Parse obj.Key into its relevant parts. bool ok = ParseS3Key(obj.Key, out type, out path, out versionString); } catch (Exception ex) { if (ex is ArgumentException || ex is IndexOutOfRangeException) { // Report error. logger.Warn("Failed to parse S3 key: {0} -- Skipping.", obj.Key); //logger.Log(LogLevel.Warn, ex, "Failed to parse S3 key: {0}", obj.Key); //SyncAgent.Results.Stats.FailedSavedFileCount += 1; // Report save progress //ReportSaveProgress(SyncAgent.Results.Stats.SavedFileCount); continue; // Skip this file. } throw; } path = StringUtils.NormalizeUsingPreferredForm(path); DateTime lastWrittenAt = DateTime.ParseExact(versionString, Models.BackupedFile.VersionFormat, CultureInfo.InvariantCulture); // Create/Update BackupPlanFile, but do not SAVE it. Models.BackupPlanFile entry = daoBackupPlanFile.GetByStorageAccountAndPath(account, path); Models.BackupedFile version = null; if (entry == null) { // Create `BackupPlanFile`. entry = new Models.BackupPlanFile(); entry.BackupPlan = null; entry.StorageAccountType = account.Type; entry.StorageAccount = account; entry.Path = path; entry.LastSize = obj.Size; entry.LastWrittenAt = lastWrittenAt; //entry.LastChecksum = ; entry.LastStatus = Models.BackupFileStatus.UNCHANGED; entry.CreatedAt = DateTime.UtcNow; // Create `BackupedFile`. version = new Models.BackupedFile(null, entry, Synchronization); version.StorageAccountType = account.Type; version.StorageAccount = account; version.FileLastWrittenAt = lastWrittenAt; version.FileLastChecksum = entry.LastChecksum; version.FileSize = entry.LastSize; version.FileStatus = Models.BackupFileStatus.MODIFIED; version.TransferStatus = TransferStatus.COMPLETED; version.UpdatedAt = DateTime.UtcNow; entry.Versions.Add(version); //daoBackupedFile.Insert(tx, version); } else { // Update `BackupPlanFile`. entry.LastSize = obj.Size; entry.LastWrittenAt = lastWrittenAt; //entry.LastChecksum = //entry.LastStatus = Models.BackupFileStatus.MODIFIED; entry.UpdatedAt = DateTime.UtcNow; IList <Models.BackupedFile> versions = null; try { versions = daoBackupedFile.GetCompletedByStorageAccountAndPath(account, path, versionString); } catch (FormatException) { // Report error. logger.Warn("Failed to parse versionString: {0} -- Skipping.", versionString); //SyncAgent.Results.Stats.FailedSavedFileCount += 1; continue; // TODO(jweyrich): Should we abort? } // Check whether our database already contains this exact file + version. if (versions == null || (versions != null && versions.Count == 0)) { // Create `BackupedFile`. version = new Models.BackupedFile(null, entry, Synchronization); version.StorageAccountType = account.Type; version.StorageAccount = account; version.FileLastWrittenAt = entry.LastWrittenAt; version.FileLastChecksum = entry.LastChecksum; version.FileSize = entry.LastSize; version.FileStatus = Models.BackupFileStatus.MODIFIED; version.TransferStatus = TransferStatus.COMPLETED; version.UpdatedAt = DateTime.UtcNow; entry.Versions.Add(version); //daoBackupedFile.Insert(tx, version); } else { // Update `BackupedFile`. version = versions.First(); version.FileLastWrittenAt = entry.LastWrittenAt; version.FileLastChecksum = entry.LastChecksum; version.FileSize = entry.LastSize; version.UpdatedAt = DateTime.UtcNow; //daoBackupedFile.Update(tx, version); } } try { // Create path nodes and INSERT them, if they don't exist yet. entry.PathNode = pathNodeCreator.CreateOrUpdatePathNodes(account, entry); // Create or update `BackupPlanFile`. daoBackupPlanFile.InsertOrUpdate(tx, entry); } catch (Exception ex) { logger.Log(LogLevel.Error, ex, "BUG: Failed to insert/update {0} => {1}", typeof(Models.BackupPlanFile).Name, CustomJsonSerializer.SerializeObject(entry, 1)); logger.Error("Dump of failed object: {0}", entry.DumpMe()); throw; } bool didCommit = batchProcessor.ProcessBatch(tx); SyncAgent.Results.Stats.SavedFileCount += 1; // Report save progress ReportSaveProgress(SyncAgent.Results.Stats.SavedFileCount); } batchProcessor.ProcessBatch(tx, true); // Report save progress ReportSaveProgress(SyncAgent.Results.Stats.SavedFileCount, true); stats.End(); // ------------------------------------------------------------------------------------ tx.Commit(); } catch (OperationCanceledException) { tx.Rollback(); // Rollback the transaction throw; } catch (Exception ex) { logger.Log(LogLevel.Error, ex, "Caught exception"); tx.Rollback(); // Rollback the transaction throw; } finally { //session.Close(); if (session.IsConnected) { session.Disconnect(); } } } }