Example #1
0
        public void Run(long samples)
        {
            if (!System.IO.File.Exists(m_options.Dbpath))
            {
                throw new UserInformationException(string.Format("Database file does not exist: {0}", m_options.Dbpath), "DatabaseDoesNotExist");
            }

            using (var db = new LocalTestDatabase(m_options.Dbpath))
                using (var backend = new BackendManager(m_backendurl, m_options, m_results.BackendWriter, db))
                {
                    db.SetResult(m_results);
                    Utility.UpdateOptionsFromDb(db, m_options);
                    Utility.VerifyParameters(db, m_options);
                    db.VerifyConsistency(m_options.Blocksize, m_options.BlockhashSize, true, null);

                    if (!m_options.NoBackendverification)
                    {
                        var backupDatabase = new LocalBackupDatabase(db, m_options);
                        var latestFilelist = backupDatabase.GetTemporaryFilelistVolumeNames(latestOnly: true);
                        FilelistProcessor.VerifyRemoteList(backend, m_options, db, m_results.BackendWriter, latestFilelist);
                    }

                    DoRun(samples, db, backend);
                    db.WriteResults();
                }
        }
Example #2
0
        private void PreBackupVerify(BackendManager backend, string protectedfile)
        {
            m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_PreBackupVerify);
            using (new Logging.Timer(LOGTAG, "PreBackupVerify", "PreBackupVerify"))
            {
                try
                {
                    if (m_options.NoBackendverification)
                    {
                        FilelistProcessor.VerifyLocalList(backend, m_options, m_database, m_result.BackendWriter);
                        UpdateStorageStatsFromDatabase();
                    }
                    else
                    {
                        FilelistProcessor.VerifyRemoteList(backend, m_options, m_database, m_result.BackendWriter, protectedfile);
                    }
                }
                catch (Exception ex)
                {
                    if (m_options.AutoCleanup)
                    {
                        Logging.Log.WriteWarningMessage(LOGTAG, "BackendVerifyFailedAttemptingCleanup", ex, "Backend verification failed, attempting automatic cleanup");
                        m_result.RepairResults = new RepairResults(m_result);
                        new RepairHandler(backend.BackendUrl, m_options, (RepairResults)m_result.RepairResults).Run();

                        Logging.Log.WriteInformationMessage(LOGTAG, "BackendCleanupFinished", "Backend cleanup finished, retrying verification");
                        FilelistProcessor.VerifyRemoteList(backend, m_options, m_database, m_result.BackendWriter);
                    }
                    else
                    {
                        throw;
                    }
                }
            }
        }
Example #3
0
 public static void VerifyRemoteList(BackendManager backend, Options options, LocalDatabase database, IBackendWriter backendWriter, bool latestVolumesOnly, IDbTransaction transaction)
 {
     if (!options.NoBackendverification)
     {
         LocalBackupDatabase  backupDatabase = new LocalBackupDatabase(database, options);
         IEnumerable <string> protectedFiles = backupDatabase.GetTemporaryFilelistVolumeNames(latestVolumesOnly, transaction);
         FilelistProcessor.VerifyRemoteList(backend, options, database, backendWriter, protectedFiles);
     }
 }
Example #4
0
        private void PostBackupVerification()
        {
            m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_PostBackupVerify);
            using (var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, m_database))
            {
                using (new Logging.Timer(LOGTAG, "AfterBackupVerify", "AfterBackupVerify"))
                    FilelistProcessor.VerifyRemoteList(backend, m_options, m_database, m_result.BackendWriter);
                backend.WaitForComplete(m_database, null);
            }

            if (m_options.BackupTestSampleCount > 0 && m_database.GetRemoteVolumes().Count() > 0)
            {
                m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_PostBackupTest);
                m_result.TestResults = new TestResults(m_result);

                using (var testdb = new LocalTestDatabase(m_database))
                    using (var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, testdb))
                        new TestHandler(m_backendurl, m_options, (TestResults)m_result.TestResults)
                        .DoRun(m_options.BackupTestSampleCount, testdb, backend);
            }
        }
Example #5
0
        public void Run(long samples)
        {
            if (!System.IO.File.Exists(m_options.Dbpath))
            {
                throw new Exception(string.Format("Database file does not exist: {0}", m_options.Dbpath));
            }

            using (var db = new LocalTestDatabase(m_options.Dbpath))
                using (var backend = new BackendManager(m_backendurl, m_options, m_results.BackendWriter, db))
                {
                    db.SetResult(m_results);
                    Utility.VerifyParameters(db, m_options);

                    if (!m_options.NoBackendverification)
                    {
                        FilelistProcessor.VerifyRemoteList(backend, m_options, db, m_results.BackendWriter);
                    }

                    DoRun(samples, db, backend);
                    db.WriteResults();
                }
        }
Example #6
0
        private void PostBackupVerification()
        {
            m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_PostBackupVerify);
            using (var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, m_database))
            {
                using (new Logging.Timer(LOGTAG, "AfterBackupVerify", "AfterBackupVerify"))
                    FilelistProcessor.VerifyRemoteList(backend, m_options, m_database, m_result.BackendWriter);
                backend.WaitForComplete(m_database, null);
            }

            long remoteVolumeCount = m_database.GetRemoteVolumes().LongCount(x => x.State == RemoteVolumeState.Verified);
            long samplesToTest     = Math.Max(m_options.BackupTestSampleCount, (long)Math.Round(remoteVolumeCount * (m_options.BackupTestPercentage / 100D), MidpointRounding.AwayFromZero));

            if (samplesToTest > 0 && remoteVolumeCount > 0)
            {
                m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_PostBackupTest);
                m_result.TestResults = new TestResults(m_result);

                using (var testdb = new LocalTestDatabase(m_database))
                    using (var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, testdb))
                        new TestHandler(m_backendurl, m_options, (TestResults)m_result.TestResults)
                        .DoRun(samplesToTest, testdb, backend);
            }
        }
Example #7
0
        public void DoRun(Database.LocalDeleteDatabase db, ref System.Data.IDbTransaction transaction, bool hasVerifiedBacked, bool forceCompact, BackendManager sharedManager)
        {
            // Workaround where we allow a running backendmanager to be used
            using (var bk = sharedManager == null ? new BackendManager(m_backendurl, m_options, m_result.BackendWriter, db) : null)
            {
                var backend = bk ?? sharedManager;

                if (!hasVerifiedBacked && !m_options.NoBackendverification)
                {
                    var backupDatabase = new LocalBackupDatabase(db, m_options);
                    var latestFilelist = backupDatabase.GetTemporaryFilelistVolumeNames(latestOnly: true, transaction: transaction);
                    FilelistProcessor.VerifyRemoteList(backend, m_options, db, m_result.BackendWriter, latestFilelist);
                }

                IListResultFileset[]      filesets         = db.FilesetsWithBackupVersion.ToArray();
                List <IListResultFileset> versionsToDelete = new List <IListResultFileset>();
                versionsToDelete.AddRange(new SpecificVersionsRemover(this.m_options).GetFilesetsToDelete(filesets));
                versionsToDelete.AddRange(new KeepTimeRemover(this.m_options).GetFilesetsToDelete(filesets));
                versionsToDelete.AddRange(new RetentionPolicyRemover(this.m_options).GetFilesetsToDelete(filesets));

                // When determining the number of full versions to keep, we need to ignore the versions already marked for removal.
                versionsToDelete.AddRange(new KeepVersionsRemover(this.m_options).GetFilesetsToDelete(filesets.Except(versionsToDelete)));

                if (!m_options.AllowFullRemoval && filesets.Length == versionsToDelete.Count)
                {
                    Logging.Log.WriteInformationMessage(LOGTAG, "PreventingLastFilesetRemoval", "Preventing removal of last fileset, use --{0} to allow removal ...", "allow-full-removal");
                    versionsToDelete = versionsToDelete.OrderBy(x => x.Version).Skip(1).ToList();
                }

                if (versionsToDelete.Count > 0)
                {
                    Logging.Log.WriteInformationMessage(LOGTAG, "DeleteRemoteFileset", "Deleting {0} remote fileset(s) ...", versionsToDelete.Count);
                }

                var lst = db.DropFilesetsFromTable(versionsToDelete.Select(x => x.Time).ToArray(), transaction).ToArray();
                foreach (var f in lst)
                {
                    db.UpdateRemoteVolume(f.Key, RemoteVolumeState.Deleting, f.Value, null, transaction);
                }

                if (!m_options.Dryrun)
                {
                    transaction.Commit();
                    transaction = db.BeginTransaction();
                }

                foreach (var f in lst)
                {
                    if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                    {
                        backend.WaitForComplete(db, transaction);
                        return;
                    }

                    if (!m_options.Dryrun)
                    {
                        backend.Delete(f.Key, f.Value);
                    }
                    else
                    {
                        Logging.Log.WriteDryrunMessage(LOGTAG, "WouldDeleteRemoteFileset", "Would delete remote fileset: {0}", f.Key);
                    }
                }

                if (sharedManager == null)
                {
                    backend.WaitForComplete(db, transaction);
                }
                else
                {
                    backend.WaitForEmpty(db, transaction);
                }

                var count = lst.Length;
                if (!m_options.Dryrun)
                {
                    if (count == 0)
                    {
                        Logging.Log.WriteInformationMessage(LOGTAG, "DeleteResults", "No remote filesets were deleted");
                    }
                    else
                    {
                        Logging.Log.WriteInformationMessage(LOGTAG, "DeleteResults", "Deleted {0} remote fileset(s)", count);
                    }
                }
                else
                {
                    if (count == 0)
                    {
                        Logging.Log.WriteDryrunMessage(LOGTAG, "WouldDeleteResults", "No remote filesets would be deleted");
                    }
                    else
                    {
                        Logging.Log.WriteDryrunMessage(LOGTAG, "WouldDeleteResults", "{0} remote fileset(s) would be deleted", count);
                    }

                    if (count > 0 && m_options.Dryrun)
                    {
                        Logging.Log.WriteDryrunMessage(LOGTAG, "WouldDeleteHelp", "Remove --dry-run to actually delete files");
                    }
                }

                if (!m_options.NoAutoCompact && (forceCompact || versionsToDelete.Count > 0))
                {
                    m_result.CompactResults = new CompactResults(m_result);
                    new CompactHandler(m_backendurl, m_options, (CompactResults)m_result.CompactResults).DoCompact(db, true, ref transaction, sharedManager);
                }

                m_result.SetResults(versionsToDelete.Select(v => new Tuple <long, DateTime>(v.Version, v.Time)), m_options.Dryrun);
            }
        }
Example #8
0
        public void DoRun(Database.LocalDeleteDatabase db, ref System.Data.IDbTransaction transaction, bool hasVerifiedBacked, bool forceCompact)
        {
            using (var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, db))
            {
                if (!hasVerifiedBacked && !m_options.NoBackendverification)
                {
                    FilelistProcessor.VerifyRemoteList(backend, m_options, db, m_result.BackendWriter);
                }

                var filesetNumbers = db.FilesetTimes.Zip(Enumerable.Range(0, db.FilesetTimes.Count()), (a, b) => new Tuple <long, DateTime>(b, a.Value));
                var toDelete       = m_options.GetFilesetsToDelete(db.FilesetTimes.Select(x => x.Value).ToArray());

                if (toDelete != null && toDelete.Length > 0)
                {
                    m_result.AddMessage(string.Format("Deleting {0} remote fileset(s) ...", toDelete.Length));
                }

                var lst = db.DropFilesetsFromTable(toDelete, transaction).ToArray();
                foreach (var f in lst)
                {
                    db.UpdateRemoteVolume(f.Key, RemoteVolumeState.Deleting, f.Value, null, transaction);
                }

                if (!m_options.Dryrun)
                {
                    transaction.Commit();
                    transaction = db.BeginTransaction();
                }

                foreach (var f in lst)
                {
                    if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                    {
                        backend.WaitForComplete(db, transaction);
                        return;
                    }

                    if (!m_options.Dryrun)
                    {
                        backend.Delete(f.Key, f.Value);
                    }
                    else
                    {
                        m_result.AddDryrunMessage(string.Format("Would delete remote fileset: {0}", f.Key));
                    }
                }

                backend.WaitForComplete(db, transaction);

                var count = lst.Length;
                if (!m_options.Dryrun)
                {
                    if (count == 0)
                    {
                        m_result.AddMessage("No remote filesets were deleted");
                    }
                    else
                    {
                        m_result.AddMessage(string.Format("Deleted {0} remote fileset(s)", count));
                    }
                }
                else
                {
                    if (count == 0)
                    {
                        m_result.AddDryrunMessage("No remote filesets would be deleted");
                    }
                    else
                    {
                        m_result.AddDryrunMessage(string.Format("{0} remote fileset(s) would be deleted", count));
                    }

                    if (count > 0 && m_options.Dryrun)
                    {
                        m_result.AddDryrunMessage("Remove --dry-run to actually delete files");
                    }
                }

                if (!m_options.NoAutoCompact && (forceCompact || (toDelete != null && toDelete.Length > 0)))
                {
                    m_result.CompactResults = new CompactResults(m_result);
                    new CompactHandler(m_backendurl, m_options, (CompactResults)m_result.CompactResults).DoCompact(db, true, ref transaction);
                }

                m_result.SetResults(
                    from n in filesetNumbers
                    where toDelete.Contains(n.Item2)
                    select n,
                    m_options.Dryrun);
            }
        }
Example #9
0
        internal bool DoCompact(LocalDeleteDatabase db, bool hasVerifiedBackend, ref System.Data.IDbTransaction transaction, BackendManager sharedBackend)
        {
            var report = db.GetCompactReport(m_options.VolumeSize, m_options.Threshold, m_options.SmallFileSize, m_options.SmallFileMaxCount, transaction);

            report.ReportCompactData();

            if (report.ShouldReclaim || report.ShouldCompact)
            {
                // Workaround where we allow a running backendmanager to be used
                using (var bk = sharedBackend == null ? new BackendManager(m_backendurl, m_options, m_result.BackendWriter, db) : null)
                {
                    var backend = bk ?? sharedBackend;
                    if (!hasVerifiedBackend && !m_options.NoBackendverification)
                    {
                        FilelistProcessor.VerifyRemoteList(backend, m_options, db, m_result.BackendWriter);
                    }

                    BlockVolumeWriter newvol = new BlockVolumeWriter(m_options);
                    newvol.VolumeID = db.RegisterRemoteVolume(newvol.RemoteFilename, RemoteVolumeType.Blocks, RemoteVolumeState.Temporary, transaction);

                    IndexVolumeWriter newvolindex = null;
                    if (m_options.IndexfilePolicy != Options.IndexFileStrategy.None)
                    {
                        newvolindex          = new IndexVolumeWriter(m_options);
                        newvolindex.VolumeID = db.RegisterRemoteVolume(newvolindex.RemoteFilename, RemoteVolumeType.Index, RemoteVolumeState.Temporary, transaction);
                        db.AddIndexBlockLink(newvolindex.VolumeID, newvol.VolumeID, transaction);
                    }

                    long   blocksInVolume = 0;
                    byte[] buffer         = new byte[m_options.Blocksize];
                    var    remoteList     = db.GetRemoteVolumes().Where(n => n.State == RemoteVolumeState.Uploaded || n.State == RemoteVolumeState.Verified).ToArray();

                    //These are for bookkeeping
                    var uploadedVolumes   = new List <KeyValuePair <string, long> >();
                    var deletedVolumes    = new List <KeyValuePair <string, long> >();
                    var downloadedVolumes = new List <KeyValuePair <string, long> >();

                    //We start by deleting unused volumes to save space before uploading new stuff
                    var fullyDeleteable = (from v in remoteList
                                           where report.DeleteableVolumes.Contains(v.Name)
                                           select(IRemoteVolume) v).ToList();
                    deletedVolumes.AddRange(DoDelete(db, backend, fullyDeleteable, ref transaction));

                    // This list is used to pick up unused volumes,
                    // so they can be deleted once the upload of the
                    // required fragments is complete
                    var deleteableVolumes = new List <IRemoteVolume>();

                    if (report.ShouldCompact)
                    {
                        newvolindex?.StartVolume(newvol.RemoteFilename);
                        var volumesToDownload = (from v in remoteList
                                                 where report.CompactableVolumes.Contains(v.Name)
                                                 select(IRemoteVolume) v).ToList();

                        using (var q = db.CreateBlockQueryHelper(transaction))
                        {
                            foreach (var entry in new AsyncDownloader(volumesToDownload, backend))
                            {
                                using (var tmpfile = entry.TempFile)
                                {
                                    if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                                    {
                                        backend.WaitForComplete(db, transaction);
                                        return(false);
                                    }

                                    downloadedVolumes.Add(new KeyValuePair <string, long>(entry.Name, entry.Size));
                                    var inst = VolumeBase.ParseFilename(entry.Name);
                                    using (var f = new BlockVolumeReader(inst.CompressionModule, tmpfile, m_options))
                                    {
                                        foreach (var e in f.Blocks)
                                        {
                                            if (q.UseBlock(e.Key, e.Value, transaction))
                                            {
                                                //TODO: How do we get the compression hint? Reverse query for filename in db?
                                                var s = f.ReadBlock(e.Key, buffer);
                                                if (s != e.Value)
                                                {
                                                    throw new Exception(string.Format("Size mismatch problem for block {0}, {1} vs {2}", e.Key, s, e.Value));
                                                }

                                                newvol.AddBlock(e.Key, buffer, 0, s, Duplicati.Library.Interface.CompressionHint.Compressible);
                                                if (newvolindex != null)
                                                {
                                                    newvolindex.AddBlock(e.Key, e.Value);
                                                }

                                                db.MoveBlockToNewVolume(e.Key, e.Value, newvol.VolumeID, transaction);
                                                blocksInVolume++;

                                                if (newvol.Filesize > m_options.VolumeSize)
                                                {
                                                    uploadedVolumes.Add(new KeyValuePair <string, long>(newvol.RemoteFilename, newvol.Filesize));
                                                    if (newvolindex != null)
                                                    {
                                                        uploadedVolumes.Add(new KeyValuePair <string, long>(newvolindex.RemoteFilename, newvolindex.Filesize));
                                                    }

                                                    if (!m_options.Dryrun)
                                                    {
                                                        backend.Put(newvol, newvolindex);
                                                    }
                                                    else
                                                    {
                                                        Logging.Log.WriteDryrunMessage(LOGTAG, "WouldUploadGeneratedBlockset", "Would upload generated blockset of size {0}", Library.Utility.Utility.FormatSizeString(newvol.Filesize));
                                                    }


                                                    newvol          = new BlockVolumeWriter(m_options);
                                                    newvol.VolumeID = db.RegisterRemoteVolume(newvol.RemoteFilename, RemoteVolumeType.Blocks, RemoteVolumeState.Temporary, transaction);

                                                    if (m_options.IndexfilePolicy != Options.IndexFileStrategy.None)
                                                    {
                                                        newvolindex          = new IndexVolumeWriter(m_options);
                                                        newvolindex.VolumeID = db.RegisterRemoteVolume(newvolindex.RemoteFilename, RemoteVolumeType.Index, RemoteVolumeState.Temporary, transaction);
                                                        db.AddIndexBlockLink(newvolindex.VolumeID, newvol.VolumeID, transaction);
                                                        newvolindex.StartVolume(newvol.RemoteFilename);
                                                    }

                                                    blocksInVolume = 0;

                                                    //After we upload this volume, we can delete all previous encountered volumes
                                                    deletedVolumes.AddRange(DoDelete(db, backend, deleteableVolumes, ref transaction));
                                                    deleteableVolumes = new List <IRemoteVolume>();
                                                }
                                            }
                                        }
                                    }

                                    deleteableVolumes.Add(entry);
                                }
                            }

                            if (blocksInVolume > 0)
                            {
                                uploadedVolumes.Add(new KeyValuePair <string, long>(newvol.RemoteFilename, newvol.Filesize));
                                if (newvolindex != null)
                                {
                                    uploadedVolumes.Add(new KeyValuePair <string, long>(newvolindex.RemoteFilename, newvolindex.Filesize));
                                }
                                if (!m_options.Dryrun)
                                {
                                    backend.Put(newvol, newvolindex);
                                }
                                else
                                {
                                    Logging.Log.WriteDryrunMessage(LOGTAG, "WouldUploadGeneratedBlockset", "Would upload generated blockset of size {0}", Library.Utility.Utility.FormatSizeString(newvol.Filesize));
                                }
                            }
                            else
                            {
                                db.RemoveRemoteVolume(newvol.RemoteFilename, transaction);
                                if (newvolindex != null)
                                {
                                    db.RemoveRemoteVolume(newvolindex.RemoteFilename, transaction);
                                    newvolindex.FinishVolume(null, 0);
                                }
                            }
                        }
                    }
                    else
                    {
                        newvolindex?.Dispose();
                        newvol.Dispose();
                    }

                    deletedVolumes.AddRange(DoDelete(db, backend, deleteableVolumes, ref transaction));

                    var downloadSize = downloadedVolumes.Where(x => x.Value >= 0).Aggregate(0L, (a, x) => a + x.Value);
                    var deletedSize  = deletedVolumes.Where(x => x.Value >= 0).Aggregate(0L, (a, x) => a + x.Value);
                    var uploadSize   = uploadedVolumes.Where(x => x.Value >= 0).Aggregate(0L, (a, x) => a + x.Value);

                    m_result.DeletedFileCount    = deletedVolumes.Count;
                    m_result.DownloadedFileCount = downloadedVolumes.Count;
                    m_result.UploadedFileCount   = uploadedVolumes.Count;
                    m_result.DeletedFileSize     = deletedSize;
                    m_result.DownloadedFileSize  = downloadSize;
                    m_result.UploadedFileSize    = uploadSize;
                    m_result.Dryrun = m_options.Dryrun;

                    if (m_result.Dryrun)
                    {
                        if (downloadedVolumes.Count == 0)
                        {
                            Logging.Log.WriteDryrunMessage(LOGTAG, "CompactResults", "Would delete {0} files, which would reduce storage by {1}", m_result.DeletedFileCount, Library.Utility.Utility.FormatSizeString(m_result.DeletedFileSize));
                        }
                        else
                        {
                            Logging.Log.WriteDryrunMessage(LOGTAG, "CompactResults", "Would download {0} file(s) with a total size of {1}, delete {2} file(s) with a total size of {3}, and compact to {4} file(s) with a size of {5}, which would reduce storage by {6} file(s) and {7}",
                                                           m_result.DownloadedFileCount,
                                                           Library.Utility.Utility.FormatSizeString(m_result.DownloadedFileSize),
                                                           m_result.DeletedFileCount,
                                                           Library.Utility.Utility.FormatSizeString(m_result.DeletedFileSize), m_result.UploadedFileCount,
                                                           Library.Utility.Utility.FormatSizeString(m_result.UploadedFileSize),
                                                           m_result.DeletedFileCount - m_result.UploadedFileCount,
                                                           Library.Utility.Utility.FormatSizeString(m_result.DeletedFileSize - m_result.UploadedFileSize));
                        }
                    }
                    else
                    {
                        if (m_result.DownloadedFileCount == 0)
                        {
                            Logging.Log.WriteInformationMessage(LOGTAG, "CompactResults", "Deleted {0} files, which reduced storage by {1}", m_result.DeletedFileCount, Library.Utility.Utility.FormatSizeString(m_result.DeletedFileSize));
                        }
                        else
                        {
                            Logging.Log.WriteInformationMessage(LOGTAG, "CompactResults", "Downloaded {0} file(s) with a total size of {1}, deleted {2} file(s) with a total size of {3}, and compacted to {4} file(s) with a size of {5}, which reduced storage by {6} file(s) and {7}",
                                                                m_result.DownloadedFileCount,
                                                                Library.Utility.Utility.FormatSizeString(downloadSize),
                                                                m_result.DeletedFileCount,
                                                                Library.Utility.Utility.FormatSizeString(m_result.DeletedFileSize),
                                                                m_result.UploadedFileCount,
                                                                Library.Utility.Utility.FormatSizeString(m_result.UploadedFileSize),
                                                                m_result.DeletedFileCount - m_result.UploadedFileCount,
                                                                Library.Utility.Utility.FormatSizeString(m_result.DeletedFileSize - m_result.UploadedFileSize));
                        }
                    }

                    backend.WaitForComplete(db, transaction);
                }

                m_result.EndTime = DateTime.UtcNow;
                return((m_result.DeletedFileCount + m_result.UploadedFileCount) > 0);
            }
            else
            {
                m_result.EndTime = DateTime.UtcNow;
                return(false);
            }
        }
Example #10
0
        private void DoRun(Database.LocalPurgeDatabase db, Library.Utility.IFilter filter, Action <System.Data.IDbCommand, long, string> filtercommand, float pgoffset, float pgspan)
        {
            m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.PurgeFiles_Begin);
            Logging.Log.WriteInformationMessage(LOGTAG, "StartingPurge", "Starting purge operation");

            var doCompactStep = !m_options.NoAutoCompact && filtercommand == null;

            using (var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, db))
            {
                if (db.PartiallyRecreated)
                {
                    throw new UserInformationException("The purge command does not work on partially recreated databases", "PurgeNotAllowedOnPartialDatabase");
                }

                if (db.RepairInProgress && filtercommand == null)
                {
                    throw new UserInformationException(string.Format("The purge command does not work on an incomplete database, try the {0} operation.", "purge-broken-files"), "PurgeNotAllowedOnIncompleteDatabase");
                }

                var versions = db.GetFilesetIDs(m_options.Time, m_options.Version).OrderByDescending(x => x).ToArray();
                if (versions.Length <= 0)
                {
                    throw new UserInformationException("No filesets matched the supplied time or versions", "NoFilesetFoundForTimeOrVersion");
                }

                var orphans = db.CountOrphanFiles(null);
                if (orphans != 0)
                {
                    throw new UserInformationException(string.Format("Unable to start the purge process as there are {0} orphan file(s)", orphans), "CannotPurgeWithOrphans");
                }

                Utility.UpdateOptionsFromDb(db, m_options);
                Utility.VerifyParameters(db, m_options);

                if (filtercommand == null)
                {
                    db.VerifyConsistency(m_options.Blocksize, m_options.BlockhashSize, false, null);

                    if (m_options.NoBackendverification)
                    {
                        FilelistProcessor.VerifyLocalList(backend, db);
                    }
                    else
                    {
                        FilelistProcessor.VerifyRemoteList(backend, m_options, db, m_result.BackendWriter, null);
                    }
                }

                var filesets = db.FilesetTimes.OrderByDescending(x => x.Value).ToArray();

                var versionprogress = ((doCompactStep ? 0.75f : 1.0f) / versions.Length) * pgspan;
                var currentprogress = pgoffset;
                var progress        = 0;

                m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.PurgeFiles_Process);
                m_result.OperationProgressUpdater.UpdateProgress(currentprogress);

                // Reverse makes sure we re-write the old versions first
                foreach (var versionid in versions.Reverse())
                {
                    progress++;
                    Logging.Log.WriteVerboseMessage(LOGTAG, "ProcessingFilelistVolumes", "Processing filelist volume {0} of {1}", progress, versions.Length);

                    using (var tr = db.BeginTransaction())
                    {
                        var ix = -1;
                        for (var i = 0; i < filesets.Length; i++)
                        {
                            if (filesets[i].Key == versionid)
                            {
                                ix = i;
                                break;
                            }
                        }

                        if (ix < 0)
                        {
                            throw new InvalidProgramException(string.Format("Fileset was reported with id {0}, but could not be found?", versionid));
                        }

                        var secs = 0;
                        while (secs < 60)
                        {
                            secs++;
                            var tfn = Volumes.VolumeBase.GenerateFilename(RemoteVolumeType.Files, m_options, null, filesets[ix].Value.AddSeconds(secs));
                            if (db.GetRemoteVolumeID(tfn, tr) < 0)
                            {
                                break;
                            }
                        }

                        var tsOriginal = filesets[ix].Value;
                        var ts         = tsOriginal.AddSeconds(secs);

                        var prevfilename = db.GetRemoteVolumeNameForFileset(filesets[ix].Key, tr);

                        if (secs >= 60)
                        {
                            throw new Exception(string.Format("Unable to create a new fileset for {0} because the resulting timestamp {1} is more than 60 seconds away", prevfilename, ts));
                        }

                        if (ix != 0 && filesets[ix - 1].Value <= ts)
                        {
                            throw new Exception(string.Format("Unable to create a new fileset for {0} because the resulting timestamp {1} is larger than the next timestamp {2}", prevfilename, ts, filesets[ix - 1].Value));
                        }

                        using (var tempset = db.CreateTemporaryFileset(versionid, tr))
                        {
                            if (filtercommand == null)
                            {
                                tempset.ApplyFilter(filter);
                            }
                            else
                            {
                                tempset.ApplyFilter(filtercommand);
                            }

                            if (tempset.RemovedFileCount == 0)
                            {
                                Logging.Log.WriteInformationMessage(LOGTAG, "NotWritingNewFileset", "Not writing a new fileset for {0} as it was not changed", prevfilename);
                                currentprogress += versionprogress;
                                tr.Rollback();
                                continue;
                            }
                            else
                            {
                                using (var tf = new Library.Utility.TempFile())
                                    using (var vol = new Volumes.FilesetVolumeWriter(m_options, ts))
                                    {
                                        var isOriginalFilesetFullBackup = db.IsFilesetFullBackup(tsOriginal);
                                        var newids = tempset.ConvertToPermanentFileset(vol.RemoteFilename, ts, isOriginalFilesetFullBackup);
                                        vol.VolumeID = newids.Item1;
                                        vol.CreateFilesetFile(isOriginalFilesetFullBackup);

                                        Logging.Log.WriteInformationMessage(LOGTAG, "ReplacingFileset", "Replacing fileset {0} with {1} which has with {2} fewer file(s) ({3} reduction)", prevfilename, vol.RemoteFilename, tempset.RemovedFileCount, Library.Utility.Utility.FormatSizeString(tempset.RemovedFileSize));

                                        db.WriteFileset(vol, newids.Item2, tr);

                                        m_result.RemovedFileSize  += tempset.RemovedFileSize;
                                        m_result.RemovedFileCount += tempset.RemovedFileCount;
                                        m_result.RewrittenFileLists++;

                                        currentprogress += (versionprogress / 2);
                                        m_result.OperationProgressUpdater.UpdateProgress(currentprogress);

                                        if (m_options.Dryrun || m_options.FullResult)
                                        {
                                            foreach (var fe in tempset.ListAllDeletedFiles())
                                            {
                                                var msg = string.Format("  Purging file {0} ({1})", fe.Key, Library.Utility.Utility.FormatSizeString(fe.Value));

                                                Logging.Log.WriteProfilingMessage(LOGTAG, "PurgeFile", msg);
                                                Logging.Log.WriteVerboseMessage(LOGTAG, "PurgeFile", msg);

                                                if (m_options.Dryrun)
                                                {
                                                    Logging.Log.WriteDryrunMessage(LOGTAG, "WouldPurgeFile", msg);
                                                }
                                            }

                                            if (m_options.Dryrun)
                                            {
                                                Logging.Log.WriteDryrunMessage(LOGTAG, "WouldWriteRemoteFiles", "Would write files to remote storage");
                                            }

                                            Logging.Log.WriteVerboseMessage(LOGTAG, "WritingRemoteFiles", "Writing files to remote storage");
                                        }

                                        if (m_options.Dryrun)
                                        {
                                            Logging.Log.WriteDryrunMessage(LOGTAG, "WouldUploadAndDelete", "Would upload file {0} ({1}) and delete file {2}, removing {3} files", vol.RemoteFilename, Library.Utility.Utility.FormatSizeString(vol.Filesize), prevfilename, tempset.RemovedFileCount);
                                            tr.Rollback();
                                        }
                                        else
                                        {
                                            var lst = db.DropFilesetsFromTable(new[] { filesets[ix].Value }, tr).ToArray();
                                            foreach (var f in lst)
                                            {
                                                db.UpdateRemoteVolume(f.Key, RemoteVolumeState.Deleting, f.Value, null, tr);
                                            }

                                            tr.Commit();
                                            backend.Put(vol, synchronous: true);
                                            backend.Delete(prevfilename, -1, true);
                                            backend.FlushDbMessages();
                                        }
                                    }
                            }
                        }
                    }

                    currentprogress += (versionprogress / 2);
                    m_result.OperationProgressUpdater.UpdateProgress(currentprogress);
                }


                if (doCompactStep)
                {
                    if (m_result.RewrittenFileLists == 0)
                    {
                        Logging.Log.WriteInformationMessage(LOGTAG, "SkippingCompacting", "Skipping compacting as no new volumes were written");
                    }
                    else
                    {
                        m_result.OperationProgressUpdater.UpdateProgress(pgoffset + (0.75f * pgspan));
                        m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.PurgeFiles_Compact);
                        m_result.CompactResults = new CompactResults(m_result);
                        using (var cdb = new Database.LocalDeleteDatabase(db))
                        {
                            var tr = cdb.BeginTransaction();
                            try
                            {
                                new CompactHandler(backend.BackendUrl, m_options, (CompactResults)m_result.CompactResults).DoCompact(cdb, true, ref tr, backend);
                            }
                            catch
                            {
                                try { tr.Rollback(); }
                                catch { }
                            }
                            finally
                            {
                                try { tr.Commit(); }
                                catch { }
                            }
                        }
                    }

                    m_result.OperationProgressUpdater.UpdateProgress(pgoffset + pgspan);
                    m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.PurgeFiles_Complete);
                }

                backend.WaitForComplete(db, null);
            }
        }
Example #11
0
        public void DoRun(Database.LocalDeleteDatabase db, ref System.Data.IDbTransaction transaction, bool hasVerifiedBacked, bool forceCompact, BackendManager sharedManager)
        {
            // Workaround where we allow a running backendmanager to be used
            using (var bk = sharedManager == null ? new BackendManager(m_backendurl, m_options, m_result.BackendWriter, db) : null)
            {
                var backend = bk ?? sharedManager;

                if (!hasVerifiedBacked && !m_options.NoBackendverification)
                {
                    FilelistProcessor.VerifyRemoteList(backend, m_options, db, m_result.BackendWriter);
                }

                var filesetNumbers = db.FilesetTimes.Zip(Enumerable.Range(0, db.FilesetTimes.Count()), (a, b) => new Tuple <long, DateTime>(b, a.Value)).ToList();
                var sets           = db.FilesetTimes.Select(x => x.Value).ToArray();
                var toDelete       = GetFilesetsToDelete(db, sets);

                if (!m_options.AllowFullRemoval && sets.Length == toDelete.Length)
                {
                    Logging.Log.WriteInformationMessage(LOGTAG, "PreventingLastFilesetRemoval", "Preventing removal of last fileset, use --{0} to allow removal ...", "allow-full-removal");
                    toDelete = toDelete.Skip(1).ToArray();
                }

                if (toDelete != null && toDelete.Length > 0)
                {
                    Logging.Log.WriteInformationMessage(LOGTAG, "DeleteRemoteFileset", "Deleting {0} remote fileset(s) ...", toDelete.Length);
                }

                var lst = db.DropFilesetsFromTable(toDelete, transaction).ToArray();
                foreach (var f in lst)
                {
                    db.UpdateRemoteVolume(f.Key, RemoteVolumeState.Deleting, f.Value, null, transaction);
                }

                if (!m_options.Dryrun)
                {
                    transaction.Commit();
                    transaction = db.BeginTransaction();
                }

                foreach (var f in lst)
                {
                    if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                    {
                        backend.WaitForComplete(db, transaction);
                        return;
                    }

                    if (!m_options.Dryrun)
                    {
                        backend.Delete(f.Key, f.Value);
                    }
                    else
                    {
                        Logging.Log.WriteDryrunMessage(LOGTAG, "WouldDeleteRemoteFileset", "Would delete remote fileset: {0}", f.Key);
                    }
                }

                if (sharedManager == null)
                {
                    backend.WaitForComplete(db, transaction);
                }
                else
                {
                    backend.WaitForEmpty(db, transaction);
                }

                var count = lst.Length;
                if (!m_options.Dryrun)
                {
                    if (count == 0)
                    {
                        Logging.Log.WriteInformationMessage(LOGTAG, "DeleteResults", "No remote filesets were deleted");
                    }
                    else
                    {
                        Logging.Log.WriteInformationMessage(LOGTAG, "DeleteResults", "Deleted {0} remote fileset(s)", count);
                    }
                }
                else
                {
                    if (count == 0)
                    {
                        Logging.Log.WriteDryrunMessage(LOGTAG, "WouldDeleteResults", "No remote filesets would be deleted");
                    }
                    else
                    {
                        Logging.Log.WriteDryrunMessage(LOGTAG, "WouldDeleteResults", "{0} remote fileset(s) would be deleted", count);
                    }

                    if (count > 0 && m_options.Dryrun)
                    {
                        Logging.Log.WriteDryrunMessage(LOGTAG, "WouldDeleteHelp", "Remove --dry-run to actually delete files");
                    }
                }

                if (!m_options.NoAutoCompact && (forceCompact || (toDelete != null && toDelete.Length > 0)))
                {
                    m_result.CompactResults = new CompactResults(m_result);
                    new CompactHandler(m_backendurl, m_options, (CompactResults)m_result.CompactResults).DoCompact(db, true, ref transaction, sharedManager);
                }

                m_result.SetResults(
                    from n in filesetNumbers
                    where toDelete.Contains(n.Item2)
                    select n,
                    m_options.Dryrun);
            }
        }
Example #12
0
        private void DoRun(LocalDatabase dbparent, Library.Utility.IFilter filter, RestoreResults result)
        {
            //In this case, we check that the remote storage fits with the database.
            //We can then query the database and find the blocks that we need to do the restore
            using (var database = new LocalRestoreDatabase(dbparent, m_options.Blocksize))
                using (var backend = new BackendManager(m_backendurl, m_options, result.BackendWriter, database))
                {
                    database.SetResult(m_result);
                    Utility.VerifyParameters(database, m_options);

                    var blockhasher = System.Security.Cryptography.HashAlgorithm.Create(m_options.BlockHashAlgorithm);
                    var filehasher  = System.Security.Cryptography.HashAlgorithm.Create(m_options.FileHashAlgorithm);
                    if (blockhasher == null)
                    {
                        throw new Exception(string.Format(Strings.Foresthash.InvalidHashAlgorithm, m_options.BlockHashAlgorithm));
                    }
                    if (!blockhasher.CanReuseTransform)
                    {
                        throw new Exception(string.Format(Strings.Foresthash.InvalidCryptoSystem, m_options.BlockHashAlgorithm));
                    }

                    if (filehasher == null)
                    {
                        throw new Exception(string.Format(Strings.Foresthash.InvalidHashAlgorithm, m_options.FileHashAlgorithm));
                    }
                    if (!filehasher.CanReuseTransform)
                    {
                        throw new Exception(string.Format(Strings.Foresthash.InvalidCryptoSystem, m_options.FileHashAlgorithm));
                    }

                    if (!m_options.NoBackendverification)
                    {
                        m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Restore_PreRestoreVerify);
                        FilelistProcessor.VerifyRemoteList(backend, m_options, database, result.BackendWriter);
                    }

                    //Figure out what files are to be patched, and what blocks are needed
                    m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Restore_CreateFileList);
                    using (new Logging.Timer("PrepareBlockList"))
                        PrepareBlockAndFileList(database, m_options, filter, result);

                    //Make the entire output setup
                    m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Restore_CreateTargetFolders);
                    using (new Logging.Timer("CreateDirectory"))
                        CreateDirectoryStructure(database, m_options, result);

                    //If we are patching an existing target folder, do not touch stuff that is already updated
                    m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Restore_ScanForExistingFiles);
                    using (new Logging.Timer("ScanForexistingTargetBlocks"))
                        ScanForExistingTargetBlocks(database, m_blockbuffer, blockhasher, filehasher, m_options, result);

                    //Look for existing blocks in the original source files only
                    using (new Logging.Timer("ScanForExistingSourceBlocksFast"))
#if DEBUG
                        if (!m_options.NoLocalBlocks && !string.IsNullOrEmpty(m_options.Restorepath))
#else
                        if (!string.IsNullOrEmpty(m_options.Restorepath))
#endif
                        {
                            m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Restore_ScanForLocalBlocks);
                            ScanForExistingSourceBlocksFast(database, m_options, m_blockbuffer, blockhasher, result);
                        }

                    if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                    {
                        backend.WaitForComplete(database, null);
                        return;
                    }

                    // If other local files already have the blocks we want, we use them instead of downloading
                    if (m_options.PatchWithLocalBlocks)
                    {
                        m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Restore_PatchWithLocalBlocks);
                        using (new Logging.Timer("PatchWithLocalBlocks"))
                            ScanForExistingSourceBlocks(database, m_options, m_blockbuffer, blockhasher, result);
                    }

                    if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                    {
                        backend.WaitForComplete(database, null);
                        return;
                    }

                    // Fill BLOCKS with remote sources
                    var volumes = database.GetMissingVolumes().ToList();

                    if (volumes.Count > 0)
                    {
                        m_result.AddMessage(string.Format("{0} remote files are required to restore", volumes.Count));
                        m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Restore_DownloadingRemoteFiles);
                    }

                    var brokenFiles = new List <string>();
                    foreach (var blockvolume in new AsyncDownloader(volumes, backend))
                    {
                        try
                        {
                            if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                            {
                                backend.WaitForComplete(database, null);
                                return;
                            }

                            using (var tmpfile = blockvolume.TempFile)
                                using (var blocks = new BlockVolumeReader(GetCompressionModule(blockvolume.Name), tmpfile, m_options))
                                    PatchWithBlocklist(database, blocks, m_options, result, m_blockbuffer);
                        }
                        catch (Exception ex)
                        {
                            brokenFiles.Add(blockvolume.Name);
                            result.AddError(string.Format("Failed to patch with remote file: \"{0}\", message: {1}", blockvolume.Name, ex.Message), ex);
                            if (ex is System.Threading.ThreadAbortException)
                            {
                                throw;
                            }
                        }
                    }

                    // Reset the filehasher if it was used to verify existing files
                    filehasher.Initialize();

                    if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                    {
                        return;
                    }

                    m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Restore_PostRestoreVerify);

                    var fileErrors = 0L;
                    // After all blocks in the files are restored, verify the file hash
                    using (new Logging.Timer("RestoreVerification"))
                        foreach (var file in database.GetFilesToRestore())
                        {
                            try
                            {
                                if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                                {
                                    backend.WaitForComplete(database, null);
                                    return;
                                }

                                result.AddVerboseMessage("Testing restored file integrity: {0}", file.Path);

                                string key;
                                long   size;
                                using (var fs = m_systemIO.FileOpenRead(file.Path))
                                {
                                    size = fs.Length;
                                    key  = Convert.ToBase64String(filehasher.ComputeHash(fs));
                                }

                                if (key != file.Hash)
                                {
                                    throw new Exception(string.Format("Failed to restore file: \"{0}\". File hash is {1}, expected hash is {2}", file.Path, key, file.Hash));
                                }
                                result.FilesRestored++;
                                result.SizeOfRestoredFiles += size;
                            }
                            catch (Exception ex)
                            {
                                fileErrors++;
                                result.AddWarning(ex.Message, ex);
                                if (ex is System.Threading.ThreadAbortException)
                                {
                                    throw;
                                }
                            }
                        }

                    if (fileErrors > 0 && brokenFiles.Count > 0)
                    {
                        m_result.AddMessage(string.Format("Failed to restore {0} files, additionally the following files failed to download, which may be the cause:{1}", fileErrors, Environment.NewLine, string.Join(Environment.NewLine, brokenFiles)));
                    }

                    // Drop the temp tables
                    database.DropRestoreTable();
                    backend.WaitForComplete(database, null);
                }

            m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Restore_Complete);
            result.EndTime = DateTime.UtcNow;
        }