public Models.BackupPlanPathNode CreateOrUpdatePathNodes(Models.StorageAccount account, Models.BackupPlanFile file) { PathNodes pathNodes = new PathNodes(file.Path); bool nodeExists = true; // Start assuming it exists. Models.BackupPlanPathNode previousNode = null; Models.BackupPlanPathNode planPathNode = null; foreach (var pathNode in pathNodes.Nodes) { // If it does not exist, it does not make sense to lookup inner directories/files. if (nodeExists) { planPathNode = _dao.GetByStorageAccountAndTypeAndPath( account, Models.EntryTypeExtensions.ToEntryType(pathNode.Type), pathNode.Path); // If we couldn't find the current `Models.BackupPlanPathNode`, it's safe to assume the inner // directories/files don't exist either. From now on, all nodes will be created/inserted. if (planPathNode == null) { nodeExists = false; } } if (!nodeExists) { //BackupPlanFile planFile = daoBackupPlanFile.GetByPlanAndPath(Backup.BackupPlan, file.Path); //Assert.NotNull(planFile, string.Format("Required {0} not found in the database.", typeof(BackupPlanFile).Name)) planPathNode = new Models.BackupPlanPathNode(file, Models.EntryTypeExtensions.ToEntryType(pathNode.Type), pathNode.Name, pathNode.Path, previousNode); if (previousNode != null) { planPathNode.Parent = previousNode; previousNode.SubNodes.Add(planPathNode); } _dao.Insert(_tx, planPathNode); _dao.Refresh(planPathNode); } previousNode = planPathNode; //session.Evict(planPathNode); // Force future queries to re-load it and its relationships. } return(previousNode); }
public BackupPlanFile(StorageAccount account, string path) : this(account) { _Path = path; }
public BackupPlanFile(StorageAccount account) : this() { _StorageAccountType = account.Type; _StorageAccount = account; }
// Summary: // Saves all instances from RemoteObjects list to the database. // Also removes them from RemoteObjects list to free memory. private void Save(CancellationToken CancellationToken) { ISession session = NHibernateHelper.GetSession(); BatchProcessor batchProcessor = new BatchProcessor(250); StorageAccountRepository daoStorageAccount = new StorageAccountRepository(session); BackupPlanFileRepository daoBackupPlanFile = new BackupPlanFileRepository(session); BackupPlanPathNodeRepository daoBackupPlanPathNode = new BackupPlanPathNodeRepository(session); BackupedFileRepository daoBackupedFile = new BackupedFileRepository(session); BlockPerfStats stats = new BlockPerfStats(); using (BatchTransaction tx = batchProcessor.BeginTransaction(session)) { try { // ------------------------------------------------------------------------------------ Models.StorageAccount account = daoStorageAccount.Get(Synchronization.StorageAccount.Id); // ------------------------------------------------------------------------------------ stats.Begin("STEP 1"); BackupPlanPathNodeCreator pathNodeCreator = new BackupPlanPathNodeCreator(daoBackupPlanPathNode, tx); // Report save progress ReportSaveProgress(SyncAgent.Results.Stats.SavedFileCount, true); // Saving loop for (int i = RemoteObjects.Count - 1; i >= 0; i--) { ListingObject obj = RemoteObjects[i]; // Get instance of object. //RemoteObjects[i] = null; RemoteObjects.RemoveAt(i); // Remove to free memory. RemoveAt(int) is O(N). // Throw if the operation was canceled. CancellationToken.ThrowIfCancellationRequested(); Models.EntryType type; string path = string.Empty; string versionString = string.Empty; try { // Parse obj.Key into its relevant parts. bool ok = ParseS3Key(obj.Key, out type, out path, out versionString); } catch (Exception ex) { if (ex is ArgumentException || ex is IndexOutOfRangeException) { // Report error. logger.Warn("Failed to parse S3 key: {0} -- Skipping.", obj.Key); //logger.Log(LogLevel.Warn, ex, "Failed to parse S3 key: {0}", obj.Key); //SyncAgent.Results.Stats.FailedSavedFileCount += 1; // Report save progress //ReportSaveProgress(SyncAgent.Results.Stats.SavedFileCount); continue; // Skip this file. } throw; } path = StringUtils.NormalizeUsingPreferredForm(path); DateTime lastWrittenAt = DateTime.ParseExact(versionString, Models.BackupedFile.VersionFormat, CultureInfo.InvariantCulture); // Create/Update BackupPlanFile, but do not SAVE it. Models.BackupPlanFile entry = daoBackupPlanFile.GetByStorageAccountAndPath(account, path); Models.BackupedFile version = null; if (entry == null) { // Create `BackupPlanFile`. entry = new Models.BackupPlanFile(); entry.BackupPlan = null; entry.StorageAccountType = account.Type; entry.StorageAccount = account; entry.Path = path; entry.LastSize = obj.Size; entry.LastWrittenAt = lastWrittenAt; //entry.LastChecksum = ; entry.LastStatus = Models.BackupFileStatus.UNCHANGED; entry.CreatedAt = DateTime.UtcNow; // Create `BackupedFile`. version = new Models.BackupedFile(null, entry, Synchronization); version.StorageAccountType = account.Type; version.StorageAccount = account; version.FileLastWrittenAt = lastWrittenAt; version.FileLastChecksum = entry.LastChecksum; version.FileSize = entry.LastSize; version.FileStatus = Models.BackupFileStatus.MODIFIED; version.TransferStatus = TransferStatus.COMPLETED; version.UpdatedAt = DateTime.UtcNow; entry.Versions.Add(version); //daoBackupedFile.Insert(tx, version); } else { // Update `BackupPlanFile`. entry.LastSize = obj.Size; entry.LastWrittenAt = lastWrittenAt; //entry.LastChecksum = //entry.LastStatus = Models.BackupFileStatus.MODIFIED; entry.UpdatedAt = DateTime.UtcNow; IList <Models.BackupedFile> versions = null; try { versions = daoBackupedFile.GetCompletedByStorageAccountAndPath(account, path, versionString); } catch (FormatException) { // Report error. logger.Warn("Failed to parse versionString: {0} -- Skipping.", versionString); //SyncAgent.Results.Stats.FailedSavedFileCount += 1; continue; // TODO(jweyrich): Should we abort? } // Check whether our database already contains this exact file + version. if (versions == null || (versions != null && versions.Count == 0)) { // Create `BackupedFile`. version = new Models.BackupedFile(null, entry, Synchronization); version.StorageAccountType = account.Type; version.StorageAccount = account; version.FileLastWrittenAt = entry.LastWrittenAt; version.FileLastChecksum = entry.LastChecksum; version.FileSize = entry.LastSize; version.FileStatus = Models.BackupFileStatus.MODIFIED; version.TransferStatus = TransferStatus.COMPLETED; version.UpdatedAt = DateTime.UtcNow; entry.Versions.Add(version); //daoBackupedFile.Insert(tx, version); } else { // Update `BackupedFile`. version = versions.First(); version.FileLastWrittenAt = entry.LastWrittenAt; version.FileLastChecksum = entry.LastChecksum; version.FileSize = entry.LastSize; version.UpdatedAt = DateTime.UtcNow; //daoBackupedFile.Update(tx, version); } } try { // Create path nodes and INSERT them, if they don't exist yet. entry.PathNode = pathNodeCreator.CreateOrUpdatePathNodes(account, entry); // Create or update `BackupPlanFile`. daoBackupPlanFile.InsertOrUpdate(tx, entry); } catch (Exception ex) { logger.Log(LogLevel.Error, ex, "BUG: Failed to insert/update {0} => {1}", typeof(Models.BackupPlanFile).Name, CustomJsonSerializer.SerializeObject(entry, 1)); logger.Error("Dump of failed object: {0}", entry.DumpMe()); throw; } bool didCommit = batchProcessor.ProcessBatch(tx); SyncAgent.Results.Stats.SavedFileCount += 1; // Report save progress ReportSaveProgress(SyncAgent.Results.Stats.SavedFileCount); } batchProcessor.ProcessBatch(tx, true); // Report save progress ReportSaveProgress(SyncAgent.Results.Stats.SavedFileCount, true); stats.End(); // ------------------------------------------------------------------------------------ tx.Commit(); } catch (OperationCanceledException) { tx.Rollback(); // Rollback the transaction throw; } catch (Exception ex) { logger.Log(LogLevel.Error, ex, "Caught exception"); tx.Rollback(); // Rollback the transaction throw; } finally { //session.Close(); if (session.IsConnected) { session.Disconnect(); } } } }
public BackupPlanTreeNodeData(Models.StorageAccount account, EntryInfo infoObject) { StorageAccount = account; InfoObject = infoObject; }