// TODO(jweyrich): Should return a HashSet/ISet instead? public override void Scan() { BlockPerfStats stats = new BlockPerfStats(); stats.Begin(); Results = new PathScanResults <CustomVersionedFile>(); // // Add sources. // foreach (var entry in Plan.SelectedSources) { try { switch (entry.Type) { default: throw new InvalidOperationException("Unhandled EntryType"); case EntryType.DRIVE: { AddDirectory(entry); break; } case EntryType.FOLDER: { AddDirectory(entry); break; } case EntryType.FILE: { AddFile(entry); break; } case EntryType.FILE_VERSION: { AddFileVersion(entry); break; } } } catch (OperationCanceledException) { throw; // Rethrow! } catch (Exception ex) { string message = string.Format("Failed to scan entry \"{0}\" - {1}", entry.Path, ex.Message); logger.Log(LogLevel.Error, ex, message); } } stats.End(); }
public override void Scan() { BlockPerfStats stats = new BlockPerfStats(); stats.Begin(); Results = new PathScanResults <string>(); // // Add sources. // foreach (var entry in Plan.SelectedSources) { try { switch (entry.Type) { default: throw new InvalidOperationException("Unhandled EntryType"); case EntryType.DRIVE: { var dir = new ZetaLongPaths.ZlpDirectoryInfo(new DriveInfo(entry.Path).RootDirectory.FullName); AddDirectory(dir); break; } case EntryType.FOLDER: { var dir = new ZetaLongPaths.ZlpDirectoryInfo(entry.Path); AddDirectory(dir); break; } case EntryType.FILE: { var file = new ZetaLongPaths.ZlpFileInfo(entry.Path); AddFile(file); break; } } } catch (OperationCanceledException) { throw; // Rethrow! } catch (Exception ex) { HandleException(entry.Type, entry.Path, ex); } } stats.End(); }
// // Loads or creates `BackupPlanFile`s for each file in `filePaths`. // Returns the complete list of `BackupPlanFile`s that are related to `filePaths`. // If a `BackupPlanFile` does not exist for a given filePath, one will be created. // // NOTE: This method does NOT change the database. // private LinkedList <Models.BackupPlanFile> DoLoadOrCreateBackupPlanFiles(Models.BackupPlan plan, LinkedList <string> filePaths) { Assert.IsNotNull(plan); Assert.IsNotNull(filePaths); Assert.IsNotNull(AllFilesFromPlan); BlockPerfStats stats = new BlockPerfStats(); stats.Begin(); Dictionary <string, Models.BackupPlanFile> processed = new Dictionary <string, Models.BackupPlanFile>(); // Check all files. foreach (string path in filePaths) { // Throw if the operation was canceled. CancellationToken.ThrowIfCancellationRequested(); string normalizedPath = StringUtils.NormalizeUsingPreferredForm(path); // // Create or update `BackupPlanFile`. // Models.BackupPlanFile backupPlanFile = null; // The complexity of Dictionary<TKey,TValue>.TryGetValue(TKey,TValue) approaches O(1) bool backupPlanFileAlreadyExists = AllFilesFromPlan.TryGetValue(normalizedPath, out backupPlanFile); if (!backupPlanFileAlreadyExists) { backupPlanFile = new Models.BackupPlanFile(plan, normalizedPath); backupPlanFile.CreatedAt = DateTime.UtcNow; } // This avoids duplicates in the list. // The complexity of setting Dictionary<TKey,TValue>[TKey] is amortized O(1) processed[normalizedPath] = backupPlanFile; } LinkedList <Models.BackupPlanFile> result = processed.ToLinkedList <Models.BackupPlanFile, KeyValuePair <string, Models.BackupPlanFile> >(p => p.Value); stats.End(); return(result); }
private void Save(ISession session) { Assert.IsFalse(IsSaved); BatchProcessor batchProcessor = new BatchProcessor(); BackupRepository daoBackup = new BackupRepository(session); BackupPlanFileRepository daoBackupPlanFile = new BackupPlanFileRepository(session); BackupedFileRepository daoBackupedFile = new BackupedFileRepository(session); BackupPlanPathNodeRepository daoBackupPlanPathNode = new BackupPlanPathNodeRepository(session); #if false var FilesToTrack = SuppliedFiles.Union(ChangeSet.DeletedFiles); var FilesToInsertOrUpdate = from f in FilesToTrack where // Keep it so we'll later add or update a `BackupedFile`. ((f.LastStatus == Models.BackupFileStatus.ADDED || f.LastStatus == Models.BackupFileStatus.MODIFIED)) // Keep it if `LastStatus` is different from `PreviousLastStatus`. || ((f.LastStatus == Models.BackupFileStatus.REMOVED || f.LastStatus == Models.BackupFileStatus.DELETED) && (f.LastStatus != f.PreviousLastStatus)) // Skip all UNCHANGED files. select f; #else var FilesToTrack = SuppliedFiles; var FilesToInsertOrUpdate = from f in FilesToTrack where // Keep it so we'll later add or update a `BackupedFile`. ((f.LastStatus == Models.BackupFileStatus.ADDED || f.LastStatus == Models.BackupFileStatus.MODIFIED)) // Skip all UNCHANGED/DELETED/REMOVED files. select f; #endif BlockPerfStats stats = new BlockPerfStats(); using (ITransaction tx = session.BeginTransaction()) { try { // ------------------------------------------------------------------------------------ stats.Begin("STEP 1"); BackupPlanPathNodeCreator pathNodeCreator = new BackupPlanPathNodeCreator(daoBackupPlanPathNode, tx); // 1 - Split path into its components and INSERT new path nodes if they don't exist yet. foreach (Models.BackupPlanFile entry in FilesToInsertOrUpdate) { // Throw if the operation was canceled. CancellationToken.ThrowIfCancellationRequested(); try { entry.PathNode = pathNodeCreator.CreateOrUpdatePathNodes(Backup.BackupPlan.StorageAccount, entry); } catch (Exception ex) { string message = string.Format("BUG: Failed to create/update {0} => {1}", typeof(Models.BackupPlanPathNode).Name, CustomJsonSerializer.SerializeObject(entry, 1)); Results.OnError(this, message); logger.Log(LogLevel.Error, ex, message); throw; } batchProcessor.ProcessBatch(session); } batchProcessor.ProcessBatch(session, true); stats.End(); // ------------------------------------------------------------------------------------ stats.Begin("STEP 2"); // 2 - Insert/Update `BackupPlanFile`s as necessary. foreach (Models.BackupPlanFile entry in FilesToInsertOrUpdate) { // Throw if the operation was canceled. CancellationToken.ThrowIfCancellationRequested(); // IMPORTANT: It's important that we guarantee the referenced `BackupPlanFile` has a valid `Id` // before we reference it elsewhere, otherwise NHibernate won't have a valid value to put on // the `backup_plan_file_id` column. try { daoBackupPlanFile.InsertOrUpdate(tx, entry); // Guarantee it's saved } catch (Exception ex) { string message = string.Format("BUG: Failed to insert/update {0} => {1}", typeof(Models.BackupPlanFile).Name, CustomJsonSerializer.SerializeObject(entry, 1)); Results.OnError(this, message); logger.Log(LogLevel.Error, ex, message); logger.Error("Dump of failed object: {0}", entry.DumpMe()); throw; } batchProcessor.ProcessBatch(session); } batchProcessor.ProcessBatch(session, true); stats.End(); // ------------------------------------------------------------------------------------ stats.Begin("STEP 3"); // 3 - Insert/Update `BackupedFile`s as necessary and add them to the `Backup`. //List<Models.BackupedFile> backupedFiles = new List<Models.BackupedFile>(FilesToInsertOrUpdate.Count()); foreach (Models.BackupPlanFile entry in FilesToInsertOrUpdate) { // Throw if the operation was canceled. CancellationToken.ThrowIfCancellationRequested(); Models.BackupedFile backupedFile = daoBackupedFile.GetByBackupAndPath(Backup, entry.Path); if (backupedFile == null) // If we're resuming, this should already exist. { // Create `BackupedFile`. backupedFile = new Models.BackupedFile(Backup, entry); } backupedFile.FileSize = entry.LastSize; backupedFile.FileStatus = entry.LastStatus; backupedFile.FileLastWrittenAt = entry.LastWrittenAt; backupedFile.FileLastChecksum = entry.LastChecksum; switch (entry.LastStatus) { default: backupedFile.TransferStatus = default(TransferStatus); break; case Models.BackupFileStatus.REMOVED: case Models.BackupFileStatus.DELETED: backupedFile.TransferStatus = TransferStatus.COMPLETED; break; } backupedFile.UpdatedAt = DateTime.UtcNow; try { daoBackupedFile.InsertOrUpdate(tx, backupedFile); } catch (Exception ex) { logger.Log(LogLevel.Error, ex, "BUG: Failed to insert/update {0} => {1}", typeof(Models.BackupedFile).Name, CustomJsonSerializer.SerializeObject(backupedFile, 1)); throw; } //backupedFiles.Add(backupedFile); batchProcessor.ProcessBatch(session); } batchProcessor.ProcessBatch(session, true); stats.End(); // ------------------------------------------------------------------------------------ stats.Begin("STEP 4"); // 4 - Update all `BackupPlanFile`s that already exist for the backup plan associated with this backup operation. { var AllFilesFromPlanThatWerentUpdatedYet = AllFilesFromPlan.Values.Except(FilesToInsertOrUpdate); foreach (Models.BackupPlanFile file in AllFilesFromPlanThatWerentUpdatedYet) { // Throw if the operation was canceled. CancellationToken.ThrowIfCancellationRequested(); //Console.WriteLine("2: {0}", file.Path); try { daoBackupPlanFile.Update(tx, file); } catch (Exception ex) { string message = string.Format("BUG: Failed to update {0} => {1} ", typeof(Models.BackupPlanFile).Name, CustomJsonSerializer.SerializeObject(file, 1)); Results.OnError(this, message); logger.Log(LogLevel.Error, ex, message); throw; } batchProcessor.ProcessBatch(session); } } batchProcessor.ProcessBatch(session, true); stats.End(); // ------------------------------------------------------------------------------------ stats.Begin("STEP 5"); // 5 - Insert/Update `Backup` and its `BackupedFile`s into the database, also saving // the `BackupPlanFile`s instances that may have been changed by step 2. { //foreach (var bf in backupedFiles) //{ // // Throw if the operation was canceled. // CancellationToken.ThrowIfCancellationRequested(); // // Backup.Files.Add(bf); // // ProcessBatch(session); //} try { daoBackup.Update(tx, Backup); } catch (Exception ex) { string message = string.Format("BUG: Failed to update {0} => {1}", typeof(Models.Backup).Name, CustomJsonSerializer.SerializeObject(Backup, 1)); Results.OnError(this, message); logger.Log(LogLevel.Error, ex, message); throw; } } batchProcessor.ProcessBatch(session, true); stats.End(); // ------------------------------------------------------------------------------------ tx.Commit(); } catch (OperationCanceledException) { string message = "Operation cancelled"; Results.OnError(this, message); logger.Warn(message); tx.Rollback(); // Rollback the transaction throw; } catch (Exception ex) { string message = string.Format("Caught Exception: {0}", ex.Message); Results.OnError(this, message); logger.Log(LogLevel.Error, ex, message); tx.Rollback(); // Rollback the transaction throw; } finally { // ... } } IsSaved = true; // 6 - Create versioned files and remove files that won't belong to this backup. TransferSet.Files = GetFilesToTransfer(Backup, SuppliedFiles); // Test to see if things are okay! { var transferCount = TransferSet.Files.Count(); var filesCount = ChangeSet.AddedFiles.Count() + ChangeSet.ModifiedFiles.Count(); Assert.IsTrue(transferCount == filesCount, "TransferSet.Files must be equal (ChangeSet.AddedFiles + ChangeSet.ModifiedFiles)"); } }
// // Summary: // Update the `LastWrittenAt`,`LastSize`,`LastStatus`,`LastUpdatedAt`,`LastChecksum` // properties of each file in `files` according to the actual state of the file in the filesystem. // // NOTE: This function has a side effect - It updates properties of items from `files`. // private void DoUpdateBackupPlanFilesStatus(LinkedList <Models.BackupPlanFile> files, bool isNewVersion) { Assert.IsNotNull(files); ISession session = NHibernateHelper.GetSession(); BackupedFileRepository daoBackupedFile = new BackupedFileRepository(session); BlockPerfStats stats = new BlockPerfStats(); stats.Begin(); // Check all files. LinkedListNode <Models.BackupPlanFile> node = files.First; while (node != null) { var next = node.Next; Models.BackupPlanFile entry = node.Value; // TODO(jweyrich): Measure whether `daoBackupedFile.GetLatestVersion(entry)` is faster or not, // and whether "entry.Versions.anything" would cause all related version to be fetched. #if false Models.BackupedFile lastVersion = entry.Versions != null && entry.Versions.Count > 0 ? entry.Versions.Last() : null; #else // This may be a version that has not COMPLETED the transfer. Models.BackupedFile lastVersion = entry.Id.HasValue ? daoBackupedFile.GetLatestVersion(entry) : null; #endif // Throw if the operation was canceled. CancellationToken.ThrowIfCancellationRequested(); // // Check what happened to the file. // bool fileExistsOnFilesystem = FileManager.FileExists(entry.Path); Models.BackupFileStatus?changeStatusTo = null; try { // // Update file properties // if (fileExistsOnFilesystem) { try { DateTime fileLastWrittenAt = FileManager.UnsafeGetFileLastWriteTimeUtc(entry.Path); long fileLength = FileManager.UnsafeGetFileSize(entry.Path); entry.LastWrittenAt = fileLastWrittenAt; entry.LastSize = fileLength; } catch (Exception ex) { string message = string.Format("Caught an exception while retrieving file properties: {0}", ex.Message); Results.OnFileFailed(this, new FileVersionerEventArgs { FilePath = entry.Path, FileSize = 0 }, message); logger.Warn(message); throw; } try { // Skip files larger than `MAX_FILESIZE_TO_HASH`. int result = BigInteger.Compare(entry.LastSize, MAX_FILESIZE_TO_HASH); if (result < 0) { entry.LastChecksum = CalculateHashForFile(entry.Path); } } catch (Exception ex) { string message = string.Format("Caught an exception while calculating the file hash: {0}", ex.Message); Results.OnFileFailed(this, new FileVersionerEventArgs { FilePath = entry.Path, FileSize = 0 }, message); logger.Warn(message); throw; } Results.OnFileCompleted(this, new FileVersionerEventArgs { FilePath = entry.Path, FileSize = entry.LastSize }); } // // Update file status // if (lastVersion != null) // File was backed up at least once in the past? { switch (entry.LastStatus) { case Models.BackupFileStatus.DELETED: // File was marked as DELETED by a previous backup? if (fileExistsOnFilesystem) // Exists? { changeStatusTo = Models.BackupFileStatus.ADDED; } break; case Models.BackupFileStatus.REMOVED: // File was marked as REMOVED by a previous backup? if (fileExistsOnFilesystem) // Exists? { changeStatusTo = Models.BackupFileStatus.ADDED; } else { // QUESTION: Do we really care to transition REMOVED to DELETED? changeStatusTo = Models.BackupFileStatus.DELETED; } break; default: // ADDED, MODIFIED, UNCHANGED if (fileExistsOnFilesystem) // Exists? { // DO NOT verify whether the file changed for a `ResumeBackupOperation`, // only for `NewBackupOperation`. if (isNewVersion) { if (IsFileModified(entry, lastVersion)) // Modified? { changeStatusTo = Models.BackupFileStatus.MODIFIED; } else if (NeedsToRetryFile(entry, lastVersion)) // Didn't complete last file transfer? { changeStatusTo = Models.BackupFileStatus.MODIFIED; } else // Not modified? { changeStatusTo = Models.BackupFileStatus.UNCHANGED; } } } else // Deleted from filesystem? { changeStatusTo = Models.BackupFileStatus.DELETED; } break; } } else // Adding to this backup? { if (fileExistsOnFilesystem) // Exists? { changeStatusTo = Models.BackupFileStatus.ADDED; } else { // Error? Can't add a non-existent file to the plan. } } if (changeStatusTo.HasValue) { entry.LastStatus = changeStatusTo.Value; entry.UpdatedAt = DateTime.UtcNow; } } catch (Exception ex) { FailedFile <Models.BackupPlanFile> failedEntry = new FailedFile <Models.BackupPlanFile>(entry, ex.Message, ex); ChangeSet.FailedFiles.AddLast(failedEntry); // Remove this entry from `files` as it clearly failed. files.Remove(node); // Complexity is O(1) } node = next; } stats.End(); }
// Summary: // Saves all instances from RemoteObjects list to the database. // Also removes them from RemoteObjects list to free memory. private void Save(CancellationToken CancellationToken) { ISession session = NHibernateHelper.GetSession(); BatchProcessor batchProcessor = new BatchProcessor(250); StorageAccountRepository daoStorageAccount = new StorageAccountRepository(session); BackupPlanFileRepository daoBackupPlanFile = new BackupPlanFileRepository(session); BackupPlanPathNodeRepository daoBackupPlanPathNode = new BackupPlanPathNodeRepository(session); BackupedFileRepository daoBackupedFile = new BackupedFileRepository(session); BlockPerfStats stats = new BlockPerfStats(); using (BatchTransaction tx = batchProcessor.BeginTransaction(session)) { try { // ------------------------------------------------------------------------------------ Models.StorageAccount account = daoStorageAccount.Get(Synchronization.StorageAccount.Id); // ------------------------------------------------------------------------------------ stats.Begin("STEP 1"); BackupPlanPathNodeCreator pathNodeCreator = new BackupPlanPathNodeCreator(daoBackupPlanPathNode, tx); // Report save progress ReportSaveProgress(SyncAgent.Results.Stats.SavedFileCount, true); // Saving loop for (int i = RemoteObjects.Count - 1; i >= 0; i--) { ListingObject obj = RemoteObjects[i]; // Get instance of object. //RemoteObjects[i] = null; RemoteObjects.RemoveAt(i); // Remove to free memory. RemoveAt(int) is O(N). // Throw if the operation was canceled. CancellationToken.ThrowIfCancellationRequested(); Models.EntryType type; string path = string.Empty; string versionString = string.Empty; try { // Parse obj.Key into its relevant parts. bool ok = ParseS3Key(obj.Key, out type, out path, out versionString); } catch (Exception ex) { if (ex is ArgumentException || ex is IndexOutOfRangeException) { // Report error. logger.Warn("Failed to parse S3 key: {0} -- Skipping.", obj.Key); //logger.Log(LogLevel.Warn, ex, "Failed to parse S3 key: {0}", obj.Key); //SyncAgent.Results.Stats.FailedSavedFileCount += 1; // Report save progress //ReportSaveProgress(SyncAgent.Results.Stats.SavedFileCount); continue; // Skip this file. } throw; } path = StringUtils.NormalizeUsingPreferredForm(path); DateTime lastWrittenAt = DateTime.ParseExact(versionString, Models.BackupedFile.VersionFormat, CultureInfo.InvariantCulture); // Create/Update BackupPlanFile, but do not SAVE it. Models.BackupPlanFile entry = daoBackupPlanFile.GetByStorageAccountAndPath(account, path); Models.BackupedFile version = null; if (entry == null) { // Create `BackupPlanFile`. entry = new Models.BackupPlanFile(); entry.BackupPlan = null; entry.StorageAccountType = account.Type; entry.StorageAccount = account; entry.Path = path; entry.LastSize = obj.Size; entry.LastWrittenAt = lastWrittenAt; //entry.LastChecksum = ; entry.LastStatus = Models.BackupFileStatus.UNCHANGED; entry.CreatedAt = DateTime.UtcNow; // Create `BackupedFile`. version = new Models.BackupedFile(null, entry, Synchronization); version.StorageAccountType = account.Type; version.StorageAccount = account; version.FileLastWrittenAt = lastWrittenAt; version.FileLastChecksum = entry.LastChecksum; version.FileSize = entry.LastSize; version.FileStatus = Models.BackupFileStatus.MODIFIED; version.TransferStatus = TransferStatus.COMPLETED; version.UpdatedAt = DateTime.UtcNow; entry.Versions.Add(version); //daoBackupedFile.Insert(tx, version); } else { // Update `BackupPlanFile`. entry.LastSize = obj.Size; entry.LastWrittenAt = lastWrittenAt; //entry.LastChecksum = //entry.LastStatus = Models.BackupFileStatus.MODIFIED; entry.UpdatedAt = DateTime.UtcNow; IList <Models.BackupedFile> versions = null; try { versions = daoBackupedFile.GetCompletedByStorageAccountAndPath(account, path, versionString); } catch (FormatException) { // Report error. logger.Warn("Failed to parse versionString: {0} -- Skipping.", versionString); //SyncAgent.Results.Stats.FailedSavedFileCount += 1; continue; // TODO(jweyrich): Should we abort? } // Check whether our database already contains this exact file + version. if (versions == null || (versions != null && versions.Count == 0)) { // Create `BackupedFile`. version = new Models.BackupedFile(null, entry, Synchronization); version.StorageAccountType = account.Type; version.StorageAccount = account; version.FileLastWrittenAt = entry.LastWrittenAt; version.FileLastChecksum = entry.LastChecksum; version.FileSize = entry.LastSize; version.FileStatus = Models.BackupFileStatus.MODIFIED; version.TransferStatus = TransferStatus.COMPLETED; version.UpdatedAt = DateTime.UtcNow; entry.Versions.Add(version); //daoBackupedFile.Insert(tx, version); } else { // Update `BackupedFile`. version = versions.First(); version.FileLastWrittenAt = entry.LastWrittenAt; version.FileLastChecksum = entry.LastChecksum; version.FileSize = entry.LastSize; version.UpdatedAt = DateTime.UtcNow; //daoBackupedFile.Update(tx, version); } } try { // Create path nodes and INSERT them, if they don't exist yet. entry.PathNode = pathNodeCreator.CreateOrUpdatePathNodes(account, entry); // Create or update `BackupPlanFile`. daoBackupPlanFile.InsertOrUpdate(tx, entry); } catch (Exception ex) { logger.Log(LogLevel.Error, ex, "BUG: Failed to insert/update {0} => {1}", typeof(Models.BackupPlanFile).Name, CustomJsonSerializer.SerializeObject(entry, 1)); logger.Error("Dump of failed object: {0}", entry.DumpMe()); throw; } bool didCommit = batchProcessor.ProcessBatch(tx); SyncAgent.Results.Stats.SavedFileCount += 1; // Report save progress ReportSaveProgress(SyncAgent.Results.Stats.SavedFileCount); } batchProcessor.ProcessBatch(tx, true); // Report save progress ReportSaveProgress(SyncAgent.Results.Stats.SavedFileCount, true); stats.End(); // ------------------------------------------------------------------------------------ tx.Commit(); } catch (OperationCanceledException) { tx.Rollback(); // Rollback the transaction throw; } catch (Exception ex) { logger.Log(LogLevel.Error, ex, "Caught exception"); tx.Rollback(); // Rollback the transaction throw; } finally { //session.Close(); if (session.IsConnected) { session.Disconnect(); } } } }
private void btnStart_Click(object sender, EventArgs e) { if (IsRunning) { return; } IsRunning = true; CancellationTokenSource = new CancellationTokenSource(); var options = new TransferAgentOptions { UploadChunkSizeInBytes = 1 * 1024 * 1024, }; string accessKey = txtAccessKey.Text.Trim(); string secretKey = txtSecretKey.Text.Trim(); string bucketName = txtBucketName.Text.Trim(); BasicAWSCredentials credentials = new BasicAWSCredentials(accessKey, secretKey); string localFilePath = txtFilePath.Text; bool fileInformed = !string.IsNullOrEmpty(localFilePath); bool fileExists = fileInformed && FileManager.FileExists(localFilePath); if (!fileInformed || !fileExists) { string message = ""; if (!fileInformed) { message = "You have to inform a file for upload"; } else if (!fileExists) { message = string.Format("The informed file does not exist: {0}", localFilePath); } MessageBox.Show(message, "Oops!", MessageBoxButtons.OK, MessageBoxIcon.Error); IsRunning = false; return; } #if true string remoteFilePath = typeof(UploadPerfTestControl).Name + ".DELETE_ME"; #else S3PathBuilder pathBuilder = new S3PathBuilder(); string remoteFilePath = pathBuilder.BuildRemotePath(localFilePath); #endif long fileSize = FileManager.UnsafeGetFileSize(localFilePath); BlockPerfStats stats = new BlockPerfStats(); S3TransferAgent xferAgent = new S3TransferAgent(options, credentials, bucketName, CancellationTokenSource.Token); xferAgent.UploadFileStarted += (object sender1, TransferFileProgressArgs e1) => { stats.Begin(); }; xferAgent.UploadFileCanceled += (object sender1, TransferFileProgressArgs e1) => { stats.End(); string message = "Canceled file upload"; MessageBox.Show(message, "Transfer canceled", MessageBoxButtons.OK, MessageBoxIcon.Information); }; xferAgent.UploadFileFailed += (object sender1, TransferFileProgressArgs e1) => { stats.End(); string message = string.Format("Failed to upload file: {0}\n{1}", e1.Exception.GetType().Name, e1.Exception.Message); MessageBox.Show(message, "Transfer failed", MessageBoxButtons.OK, MessageBoxIcon.Error); }; xferAgent.UploadFileCompleted += (object sender1, TransferFileProgressArgs e1) => { stats.End(); string message = string.Format( "Took {0} to upload {1}", TimeSpanUtils.GetReadableTimespan(stats.Duration), FileSizeUtils.FileSizeToString(fileSize) ); MessageBox.Show(message, "Transfer completed", MessageBoxButtons.OK, MessageBoxIcon.Information); }; //xferAgent.UploadFileProgress += (object sender1, TransferFileProgressArgs e1) => //{ // // ... //}; xferAgent.UploadFile(localFilePath, remoteFilePath, null); IsRunning = false; }
// // Summary: // 1. Create `RestorePlanFile`s and `RestoredFile`s as necessary and add them to the `Restore`. // 2. Insert/Update `Restore` and its `RestorededFile`s into the database, also saving // the `RestorePlanFile`s instances that may have been changed by step 1.2. // 3. Create versioned files and remove files that won't belong to this restore. // public void Save() { Assert.IsFalse(IsSaved); ISession session = NHibernateHelper.GetSession(); RestoreRepository daoRestore = new RestoreRepository(session); RestorePlanFileRepository daoRestorePlanFile = new RestorePlanFileRepository(session); RestoredFileRepository daoRestoredFile = new RestoredFileRepository(session); BackupPlanPathNodeRepository daoBackupPlanPathNode = new BackupPlanPathNodeRepository(session); var FilesToTrack = SuppliedFiles; var FilesToInsertOrUpdate = FilesToTrack; BlockPerfStats stats = new BlockPerfStats(); using (ITransaction tx = session.BeginTransaction()) { try { // ------------------------------------------------------------------------------------ stats.Begin("STEP 1"); // 1. Create `RestorePlanFile`s and `RestoredFile`s as necessary and add them to the `Restore`. foreach (Models.RestorePlanFile entry in FilesToInsertOrUpdate) { // Throw if the operation was canceled. CancellationToken.ThrowIfCancellationRequested(); // 1.1 - Insert/Update RestorePlanFile's and RestoredFile's if they don't exist yet. // IMPORTANT: It's important that we guarantee the referenced `RestorePlanFile` has a valid `Id` // before we reference it elsewhere, otherwise NHibernate won't have a valid value to put on // the `restore_plan_file_id` column. daoRestorePlanFile.InsertOrUpdate(tx, entry); // Guarantee it's saved Models.RestoredFile restoredFile = daoRestoredFile.GetByRestoreAndPath(Restore, entry.Path); if (restoredFile == null) // If we're resuming, this should already exist. { // Create `RestoredFile`. Models.BackupedFile backupedFile = entry.VersionedFile.UserData as Models.BackupedFile; restoredFile = new Models.RestoredFile(Restore, entry, backupedFile); } restoredFile.UpdatedAt = DateTime.UtcNow; daoRestoredFile.InsertOrUpdate(tx, restoredFile); Restore.Files.Add(restoredFile); //daoRestore.Update(tx, Restore); ProcessBatch(session); } ProcessBatch(session, true); stats.End(); // ------------------------------------------------------------------------------------ stats.Begin("STEP 2"); // 2. Insert/Update `Restore` and its `RestorededFile`s into the database, also saving // the `RestorePlanFile`s instances that may have been changed by step 1.2. { daoRestore.Update(tx, Restore); } ProcessBatch(session, true); stats.End(); // ------------------------------------------------------------------------------------ tx.Commit(); } catch (OperationCanceledException) { tx.Rollback(); // Rollback the transaction throw; } catch (Exception) { tx.Rollback(); // Rollback the transaction throw; } finally { //session.Close(); if (session.IsConnected) { session.Disconnect(); } } } IsSaved = true; // 3. Create versioned files and remove files that won't belong to this restore. TransferSet.Files = GetFilesToTransfer(Restore, SuppliedFiles); }