Наследование: MonoBehaviour
Пример #1
0
        /// <summary>
        /// Helper method that verifies uploaded volumes and updates their state in the database.
        /// Throws an error if there are issues with the remote storage
        /// </summary>
        /// <param name="options">The options used</param>
        /// <param name="database">The database to compare with</param>
        /// <param name="log">The log instance to use</param>
        public static void VerifyLocalList(BackendManager backend, Options options, LocalDatabase database, IBackendWriter log)
        {
            var locallist = database.GetRemoteVolumes();
            foreach(var i in locallist)
            {
                switch (i.State)
                {
                    case RemoteVolumeState.Uploaded:
                    case RemoteVolumeState.Verified:
                    case RemoteVolumeState.Deleted:
                        break;

                    case RemoteVolumeState.Temporary:
                    case RemoteVolumeState.Deleting:
                    case RemoteVolumeState.Uploading:
                        log.AddMessage(string.Format("removing remote file listed as {0}: {1}", i.State, i.Name));
                        try
                        {
                            backend.Delete(i.Name, i.Size, true);
                        }
                        catch (Exception ex)
                        {
                            log.AddWarning(string.Format("Failed to erase file {0}, treating as deleted: {1}", i.Name, ex.Message), ex);
                        }

                        break;

                    default:
                        log.AddWarning(string.Format("unknown state for remote file listed as {0}: {1}", i.State, i.Name), null);
                        break;
                }

                backend.FlushDbMessages();
            }
        }
 private void Awake() {
     backendManager = GetComponent<BackendManager>();
     if (backendManager == null) {
         Debug.LogWarning("BackendManager not found, disabling menu.");
         enabled = false;
     }
 }
     public void Run(IEnumerable<string> filterstrings = null, Library.Utility.IFilter compositefilter = null)
     {
         using (var tmpdb = new Library.Utility.TempFile())
         using (var db = new Database.LocalDatabase(System.IO.File.Exists(m_options.Dbpath) ? m_options.Dbpath : (string)tmpdb, "ListControlFiles"))
         using (var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, db))
         {
             m_result.SetDatabase(db);
             
             var filter = Library.Utility.JoinedFilterExpression.Join(new Library.Utility.FilterExpression(filterstrings), compositefilter);
             
             try
             {
                 var filteredList = ListFilesHandler.ParseAndFilterFilesets(backend.List(), m_options);
                 if (filteredList.Count == 0)
                     throw new Exception("No filesets found on remote target");
 
                 Exception lastEx = new Exception("No suitable files found on remote target");
 
                 foreach(var fileversion in filteredList)
                     try
                     {
                         if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                             return;
                     
                         var file = fileversion.Value.File;
                         long size;
                         string hash;
                         RemoteVolumeType type;
                         RemoteVolumeState state;
                         if (!db.GetRemoteVolume(file.Name, out hash, out size, out type, out state))
                             size = file.Size;
 
                         var files = new List<Library.Interface.IListResultFile>();
                         using (var tmpfile = backend.Get(file.Name, size, hash))
                         using (var tmp = new Volumes.FilesetVolumeReader(RestoreHandler.GetCompressionModule(file.Name), tmpfile, m_options))
                             foreach (var cf in tmp.ControlFiles)
                                 if (Library.Utility.FilterExpression.Matches(filter, cf.Key))
                                     files.Add(new ListResultFile(cf.Key, null));
                         
                         m_result.SetResult(new Library.Interface.IListResultFileset[] { new ListResultFileset(fileversion.Key, fileversion.Value.Time, -1, -1) }, files);
                         lastEx = null;
                         break;
                     }
                     catch(Exception ex)
                     {
                         lastEx = ex;
                         if (ex is System.Threading.ThreadAbortException)
                             throw;
                     }
 
                 if (lastEx != null)
                     throw lastEx;
             }
             finally
             {
                 backend.WaitForComplete(db, null);
             }
         }
     }    }
Пример #4
0
        /// <summary>
        /// Helper method that verifies uploaded volumes and updates their state in the database.
        /// Throws an error if there are issues with the remote storage
        /// </summary>
        /// <param name="backend">The backend instance to use</param>
        /// <param name="options">The options used</param>
        /// <param name="database">The database to compare with</param>
        /// <param name="log">The log instance to use</param>
        public static void VerifyRemoteList(BackendManager backend, Options options, LocalDatabase database, IBackendWriter log)
		{
			var tp = RemoteListAnalysis(backend, options, database, log);
			long extraCount = 0;
			long missingCount = 0;
            
			foreach(var n in tp.ExtraVolumes)
			{
				log.AddWarning(string.Format("Extra unknown file: {0}", n.File.Name), null);
				extraCount++;
			}

			foreach(var n in tp.MissingVolumes)
			{
				log.AddWarning(string.Format("Missing file: {0}", n.Name), null);
				missingCount++;
			}

			if (extraCount > 0)
			{
				var s = string.Format("Found {0} remote files that are not recorded in local storage, please run repair", extraCount);
				log.AddError(s, null);
				throw new Exception(s);
			}

            var lookup = new Dictionary<string, string>();
            var doubles = new Dictionary<string, string>();
            foreach(var v in tp.ParsedVolumes)
            {
                if (lookup.ContainsKey(v.File.Name))
                    doubles[v.File.Name] = null;
                else
                    lookup[v.File.Name] = null;
            }

            if (doubles.Count > 0)
            {
                var s = string.Format("Found remote files reported as duplicates, either the backend module is broken or you need to manually remove the extra copies.\nThe following files were found multiple times: {0}", string.Join(", ", doubles.Keys));
                log.AddError(s, null);
                throw new Exception(s);
            }

            if (missingCount > 0)
            {
            	string s;
                if (!tp.BackupPrefixes.Contains(options.Prefix) && tp.BackupPrefixes.Length > 0)
                	s = string.Format("Found {0} files that are missing from the remote storage, and no files with the backup prefix {1}, but found the following backup prefixes: {2}", missingCount, options.Prefix, string.Join(", ", tp.BackupPrefixes));
                else
                	s = string.Format("Found {0} files that are missing from the remote storage, please run repair", missingCount);
                
                log.AddError(s, null);
                throw new Exception(s);
            }            
        }
Пример #5
0
 public void Run(long samples)
 {
     if (!System.IO.File.Exists(m_options.Dbpath))
         throw new Exception(string.Format("Database file does not exist: {0}", m_options.Dbpath));
                         
     using(var db = new LocalTestDatabase(m_options.Dbpath))
     using(var backend = new BackendManager(m_backendurl, m_options, m_results.BackendWriter, db))
     {
         db.SetResult(m_results);
         Utility.VerifyParameters(db, m_options);
         
         if (!m_options.NoBackendverification)
             FilelistProcessor.VerifyRemoteList(backend, m_options, db, m_results.BackendWriter);
             
         DoRun(samples, db, backend);
         db.WriteResults();
     }
 }
Пример #6
0
        /// <summary>
        /// Helper method that verifies uploaded volumes and updates their state in the database.
        /// Throws an error if there are issues with the remote storage
        /// </summary>
        /// <param name="backend">The backend instance to use</param>
        /// <param name="options">The options used</param>
        /// <param name="database">The database to compare with</param>
        public static void VerifyRemoteList(BackendManager backend, Options options, LocalDatabase database, IBackendWriter log)
		{
			var tp = RemoteListAnalysis(backend, options, database, log);
			long extraCount = 0;
			long missingCount = 0;
            
			foreach(var n in tp.ExtraVolumes)
			{
				log.AddWarning(string.Format("Extra unknown file: {0}", n.File.Name), null);
				extraCount++;
			}

			foreach(var n in tp.MissingVolumes)
			{
				log.AddWarning(string.Format("Missing file: {0}", n.Name), null);
				missingCount++;
			}

			if (extraCount > 0)
			{
				var s = string.Format("Found {0} remote files that are not recorded in local storage, please run repair", extraCount);
				log.AddError(s, null);
				throw new Exception(s);
			}

            if (missingCount > 0)
            {
            	string s;
                if (!tp.BackupPrefixes.Contains(options.Prefix) && tp.BackupPrefixes.Length > 0)
                	s = string.Format("Found {0} files that are missing from the remote storage, and no files with the backup prefix {1}, but found the following backup prefixes: {2}", missingCount, options.Prefix, string.Join(", ", tp.BackupPrefixes));
                else
                	s = string.Format("Found {0} files that are missing from the remote storage, please run repair", missingCount);
                
                log.AddError(s, null);
                throw new Exception(s);
            }            
        }
Пример #7
0
        public void Run(long samples)
        {
            if (!System.IO.File.Exists(m_options.Dbpath))
            {
                throw new UserInformationException(string.Format("Database file does not exist: {0}", m_options.Dbpath), "DatabaseDoesNotExist");
            }

            using (var db = new LocalTestDatabase(m_options.Dbpath))
                using (var backend = new BackendManager(m_backendurl, m_options, m_results.BackendWriter, db))
                {
                    db.SetResult(m_results);
                    Utility.UpdateOptionsFromDb(db, m_options);
                    Utility.VerifyParameters(db, m_options);
                    db.VerifyConsistency(m_options.Blocksize, m_options.BlockhashSize, true, null);

                    if (!m_options.NoBackendverification)
                    {
                        FilelistProcessor.VerifyRemoteList(backend, m_options, db, m_results.BackendWriter);
                    }

                    DoRun(samples, db, backend);
                    db.WriteResults();
                }
        }
    // Start is called before the first frame update
    void Start()
    {
        // singleton
        if (instance == null)
        {
            instance = this;
            DontDestroyOnLoad(gameObject);
        }
        else
        {
            Destroy(instance.gameObject);
        }

#if UNITY_EDITOR
        // 준비 완료
        LogMonitor.instance.AddLog("Playform : UNITY_EDITOR");
        tcm.ReadyToStart();
#elif UNITY_ANDROID
        LogMonitor.instance.AddLog("Playform : UNITY_ANDROID");

        GPGSInit();
        BackendInit();
#endif
    }
Пример #9
0
        private void PostBackupVerification()
        {
            m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_PostBackupVerify);
            using (var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, m_database))
            {
                using (new Logging.Timer(LOGTAG, "AfterBackupVerify", "AfterBackupVerify"))
                    FilelistProcessor.VerifyRemoteList(backend, m_options, m_database, m_result.BackendWriter);
                backend.WaitForComplete(m_database, null);
            }

            long remoteVolumeCount = m_database.GetRemoteVolumes().LongCount(x => x.State == RemoteVolumeState.Verified);
            long samplesToTest     = Math.Max(m_options.BackupTestSampleCount, (long)Math.Round(remoteVolumeCount * (m_options.BackupTestPercentage / 100D), MidpointRounding.AwayFromZero));

            if (samplesToTest > 0 && remoteVolumeCount > 0)
            {
                m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_PostBackupTest);
                m_result.TestResults = new TestResults(m_result);

                using (var testdb = new LocalTestDatabase(m_database))
                    using (var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, testdb))
                        new TestHandler(m_backendurl, m_options, (TestResults)m_result.TestResults)
                        .DoRun(samplesToTest, testdb, backend);
            }
        }
Пример #10
0
        /// <summary>
        /// Helper method that verifies uploaded volumes and updates their state in the database.
        /// Throws an error if there are issues with the remote storage
        /// </summary>
        /// <param name="options">The options used</param>
        /// <param name="database">The database to compare with</param>
        /// <param name="log">The log instance to use</param>
        public static void VerifyLocalList(BackendManager backend, Options options, LocalDatabase database, IBackendWriter log)
        {
            var locallist = database.GetRemoteVolumes();

            foreach (var i in locallist)
            {
                switch (i.State)
                {
                case RemoteVolumeState.Uploaded:
                case RemoteVolumeState.Verified:
                case RemoteVolumeState.Deleted:
                    break;

                case RemoteVolumeState.Temporary:
                case RemoteVolumeState.Deleting:
                case RemoteVolumeState.Uploading:
                    log.AddMessage(string.Format("removing remote file listed as {0}: {1}", i.State, i.Name));
                    try
                    {
                        backend.Delete(i.Name, i.Size, true);
                    }
                    catch (Exception ex)
                    {
                        log.AddWarning(string.Format("Failed to erase file {0}, treating as deleted: {1}", i.Name, ex.Message), ex);
                    }

                    break;

                default:
                    log.AddWarning(string.Format("unknown state for remote file listed as {0}: {1}", i.State, i.Name), null);
                    break;
                }

                backend.FlushDbMessages();
            }
        }
Пример #11
0
        /// <summary>
        /// Helper method that verifies uploaded volumes and updates their state in the database.
        /// Throws an error if there are issues with the remote storage
        /// </summary>
        /// <param name="database">The database to compare with</param>
        public static void VerifyLocalList(BackendManager backend, LocalDatabase database)
        {
            var locallist = database.GetRemoteVolumes();

            foreach (var i in locallist)
            {
                switch (i.State)
                {
                case RemoteVolumeState.Uploaded:
                case RemoteVolumeState.Verified:
                case RemoteVolumeState.Deleted:
                    break;

                case RemoteVolumeState.Temporary:
                case RemoteVolumeState.Deleting:
                case RemoteVolumeState.Uploading:
                    Logging.Log.WriteInformationMessage(LOGTAG, "RemovingStaleFile", "Removing remote file listed as {0}: {1}", i.State, i.Name);
                    try
                    {
                        backend.Delete(i.Name, i.Size, true);
                    }
                    catch (Exception ex)
                    {
                        Logging.Log.WriteWarningMessage(LOGTAG, "DeleteFileFailed", ex, "Failed to erase file {0}, treating as deleted: {1}", i.Name, ex.Message);
                    }

                    break;

                default:
                    Logging.Log.WriteWarningMessage(LOGTAG, "UnknownFileState", null, "Unknown state for remote file listed as {0}: {1}", i.State, i.Name);
                    break;
                }

                backend.FlushDbMessages();
            }
        }
Пример #12
0
        private IEnumerable <KeyValuePair <string, long> > DoDelete(LocalDeleteDatabase db, BackendManager backend, IEnumerable <IRemoteVolume> deleteableVolumes, ref System.Data.IDbTransaction transaction)
        {
            // Mark all volumes as disposable
            foreach (var f in deleteableVolumes)
            {
                db.UpdateRemoteVolume(f.Name, RemoteVolumeState.Deleting, f.Size, f.Hash, transaction);
            }

            // Before we commit the current state, make sure the backend has caught up
            backend.WaitForEmpty(db, transaction);

            if (!m_options.Dryrun)
            {
                transaction.Commit();
                transaction = db.BeginTransaction();
            }

            return(PerformDelete(backend, db.GetDeletableVolumes(deleteableVolumes, transaction)));
        }
Пример #13
0
        /// <summary>
        /// Helper method that verifies uploaded volumes and updates their state in the database.
        /// Throws an error if there are issues with the remote storage
        /// </summary>
        /// <param name="backend">The backend instance to use</param>
        /// <param name="options">The options used</param>
        /// <param name="database">The database to compare with</param>
        public static RemoteAnalysisResult RemoteListAnalysis(BackendManager backend, Options options, LocalDatabase database, IBackendWriter log)
        {
            var rawlist = backend.List();
            var lookup = new Dictionary<string, Volumes.IParsedVolume>();

            var remotelist = (from n in rawlist let p = Volumes.VolumeBase.ParseFilename(n) where p != null select p).ToList();
            var unknownlist = (from n in rawlist let p = Volumes.VolumeBase.ParseFilename(n) where p == null select n).ToList();
            var filesets = (from n in remotelist where n.FileType == RemoteVolumeType.Files orderby n.Time descending select n).ToList();

            log.KnownFileCount = remotelist.Count();
            log.KnownFileSize = remotelist.Select(x => x.File.Size).Sum();
            log.UnknownFileCount = unknownlist.Count();
            log.UnknownFileSize = unknownlist.Select(x => x.Size).Sum();
            log.BackupListCount = filesets.Count;
            log.LastBackupDate = filesets.Count == 0 ? new DateTime(0) : filesets[0].Time.ToLocalTime();

            if (backend is Library.Interface.IQuotaEnabledBackend)
            {
                log.TotalQuotaSpace = ((Library.Interface.IQuotaEnabledBackend)backend).TotalQuotaSpace;
                log.FreeQuotaSpace = ((Library.Interface.IQuotaEnabledBackend)backend).FreeQuotaSpace;
            }

            log.AssignedQuotaSpace = options.QuotaSize;

            foreach (var s in remotelist)
                if (s.Prefix == options.Prefix)
                    lookup[s.File.Name] = s;

            var missing = new List<RemoteVolumeEntry>();
            var locallist = database.GetRemoteVolumes();
            foreach (var i in locallist)
            {
                //Ignore those that are deleted
                if (i.State == RemoteVolumeState.Deleted)
                    continue;

                if (i.State == RemoteVolumeState.Temporary)
                {
                    log.AddMessage(string.Format("removing file listed as {0}: {1}", i.State, i.Name));
                    database.RemoveRemoteVolume(i.Name, null);
                }
                else if (i.State == RemoteVolumeState.Deleting && lookup.ContainsKey(i.Name))
                {
                    log.AddMessage(string.Format("removing remote file listed as {0}: {1}", i.State, i.Name));
                    backend.Delete(i.Name, i.Size, true);
                    lookup.Remove(i.Name);
                }
                else
                {
                    Volumes.IParsedVolume r;
                    if (!lookup.TryGetValue(i.Name, out r))
                    {
                        if (i.State == RemoteVolumeState.Uploading || i.State == RemoteVolumeState.Deleting || (r != null && r.File.Size != i.Size && r.File.Size >= 0 && i.Size >= 0))
                        {
                            log.AddMessage(string.Format("removing file listed as {0}: {1}", i.State, i.Name));
                            database.RemoveRemoteVolume(i.Name, null);
                        }
                        else
                            missing.Add(i);
                    }
                    else if (i.State != RemoteVolumeState.Verified)
                    {
                        database.UpdateRemoteVolume(i.Name, RemoteVolumeState.Verified, i.Size, i.Hash);
                    }

                    lookup.Remove(i.Name);
                }
            }

            return new RemoteAnalysisResult() { ParsedVolumes = remotelist, ExtraVolumes = lookup.Values, MissingVolumes = missing };
        }
Пример #14
0
        private void PostBackupVerification()
        {
            m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_PostBackupVerify);
            using(var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, m_database))
            {
                using(new Logging.Timer("AfterBackupVerify"))
                    FilelistProcessor.VerifyRemoteList(backend, m_options, m_database, m_result.BackendWriter);
                backend.WaitForComplete(m_database, null);
            }

            if (m_options.BackupTestSampleCount > 0 && m_database.GetRemoteVolumes().Count() > 0)
            {
                m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_PostBackupTest);
                m_result.TestResults = new TestResults(m_result);

                using(var testdb = new LocalTestDatabase(m_database))
                using(var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, testdb))
                    new TestHandler(m_backendurl, m_options, new TestResults(m_result))
                        .DoRun(m_options.BackupTestSampleCount, testdb, backend);
            }
        }
Пример #15
0
        /// <summary>
        /// Run the recreate procedure
        /// </summary>
        /// <param name="dbparent">The database to restore into</param>
        /// <param name="updating">True if this is an update call, false otherwise</param>
        /// <param name="filter">A filter that can be used to disregard certain remote files, intended to be used to select a certain filelist</param>
        /// <param name="filelistfilter">Filters the files in a filelist to prevent downloading unwanted data</param>
        /// <param name="blockprocessor">A callback hook that can be used to work with downloaded block volumes, intended to be use to recover data blocks while processing blocklists</param>
        internal void DoRun(LocalDatabase dbparent, bool updating, Library.Utility.IFilter filter = null, NumberedFilterFilelistDelegate filelistfilter = null, BlockVolumePostProcessor blockprocessor = null)
        {
            m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Recreate_Running);

            //We build a local database in steps.
            using (var restoredb = new LocalRecreateDatabase(dbparent, m_options))
                using (var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, restoredb))
                {
                    restoredb.RepairInProgress = true;

                    var volumeIds = new Dictionary <string, long>();

                    var rawlist = backend.List();

                    //First step is to examine the remote storage to see what
                    // kind of data we can find
                    var remotefiles =
                        (from x in rawlist
                         let n = VolumeBase.ParseFilename(x)
                                 where
                                 n != null
                                 &&
                                 n.Prefix == m_options.Prefix
                                 select n).ToArray(); //ToArray() ensures that we do not remote-request it multiple times

                    if (remotefiles.Length == 0)
                    {
                        if (rawlist.Count == 0)
                        {
                            throw new UserInformationException("No files were found at the remote location, perhaps the target url is incorrect?", "EmptyRemoteLocation");
                        }
                        else
                        {
                            var tmp =
                                (from x in rawlist
                                 let n = VolumeBase.ParseFilename(x)
                                         where
                                         n != null
                                         select n.Prefix).ToArray();

                            var types = tmp.Distinct().ToArray();
                            if (tmp.Length == 0)
                            {
                                throw new UserInformationException(string.Format("Found {0} files at the remote storage, but none that could be parsed", rawlist.Count), "EmptyRemoteLocation");
                            }
                            else if (types.Length == 1)
                            {
                                throw new UserInformationException(string.Format("Found {0} parse-able files with the prefix {1}, did you forget to set the backup prefix?", tmp.Length, types[0]), "EmptyRemoteLocationWithPrefix");
                            }
                            else
                            {
                                throw new UserInformationException(string.Format("Found {0} parse-able files (of {1} files) with different prefixes: {2}, did you forget to set the backup prefix?", tmp.Length, rawlist.Count, string.Join(", ", types)), "EmptyRemoteLocationWithPrefix");
                            }
                        }
                    }

                    //Then we select the filelist we should work with,
                    // and create the filelist table to fit
                    IEnumerable <IParsedVolume> filelists =
                        from n in remotefiles
                        where n.FileType == RemoteVolumeType.Files
                        orderby n.Time descending
                        select n;

                    if (filelists.Count() <= 0)
                    {
                        throw new UserInformationException(string.Format("No filelists found on the remote destination"), "EmptyRemoteLocation");
                    }

                    if (filelistfilter != null)
                    {
                        filelists = filelistfilter(filelists).Select(x => x.Value).ToArray();
                    }

                    if (filelists.Count() <= 0)
                    {
                        throw new UserInformationException(string.Format("No filelists"), "NoMatchingRemoteFilelists");
                    }

                    // If we are updating, all files should be accounted for
                    foreach (var fl in remotefiles)
                    {
                        volumeIds[fl.File.Name] = updating ? restoredb.GetRemoteVolumeID(fl.File.Name) : restoredb.RegisterRemoteVolume(fl.File.Name, fl.FileType, fl.File.Size, RemoteVolumeState.Uploaded);
                    }

                    var hasUpdatedOptions = false;

                    if (updating)
                    {
                        Utility.UpdateOptionsFromDb(restoredb, m_options);
                        Utility.VerifyParameters(restoredb, m_options);
                    }

                    //Record all blocksets and files needed
                    using (var tr = restoredb.BeginTransaction())
                    {
                        var filelistWork = (from n in filelists orderby n.Time select new RemoteVolume(n.File) as IRemoteVolume).ToList();
                        Logging.Log.WriteInformationMessage(LOGTAG, "RebuildStarted", "Rebuild database started, downloading {0} filelists", filelistWork.Count);

                        var progress = 0;

                        // Register the files we are working with, if not already updated
                        if (updating)
                        {
                            foreach (var n in filelists)
                            {
                                if (volumeIds[n.File.Name] == -1)
                                {
                                    volumeIds[n.File.Name] = restoredb.RegisterRemoteVolume(n.File.Name, n.FileType, RemoteVolumeState.Uploaded, n.File.Size, new TimeSpan(0), tr);
                                }
                            }
                        }

                        var isFirstFilelist = true;
                        var blocksize       = m_options.Blocksize;
                        var hashes_pr_block = blocksize / m_options.BlockhashSize;

                        foreach (var entry in new AsyncDownloader(filelistWork, backend))
                        {
                            try
                            {
                                if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                                {
                                    backend.WaitForComplete(restoredb, null);
                                    m_result.EndTime = DateTime.UtcNow;
                                    return;
                                }

                                progress++;
                                if (filelistWork.Count == 1 && m_options.RepairOnlyPaths)
                                {
                                    m_result.OperationProgressUpdater.UpdateProgress(0.5f);
                                }
                                else
                                {
                                    m_result.OperationProgressUpdater.UpdateProgress(((float)progress / filelistWork.Count()) * (m_options.RepairOnlyPaths ? 1f : 0.2f));
                                }

                                using (var tmpfile = entry.TempFile)
                                {
                                    isFirstFilelist = false;

                                    if (entry.Hash != null && entry.Size > 0)
                                    {
                                        restoredb.UpdateRemoteVolume(entry.Name, RemoteVolumeState.Verified, entry.Size, entry.Hash, tr);
                                    }

                                    var parsed = VolumeBase.ParseFilename(entry.Name);

                                    if (!hasUpdatedOptions && !updating)
                                    {
                                        VolumeReaderBase.UpdateOptionsFromManifest(parsed.CompressionModule, tmpfile, m_options);
                                        hasUpdatedOptions = true;
                                        // Recompute the cached sizes
                                        blocksize       = m_options.Blocksize;
                                        hashes_pr_block = blocksize / m_options.BlockhashSize;
                                    }


                                    // Create timestamped operations based on the file timestamp
                                    var filesetid = restoredb.CreateFileset(volumeIds[entry.Name], parsed.Time, tr);
                                    using (var filelistreader = new FilesetVolumeReader(parsed.CompressionModule, tmpfile, m_options))
                                        foreach (var fe in filelistreader.Files.Where(x => Library.Utility.FilterExpression.Matches(filter, x.Path)))
                                        {
                                            try
                                            {
                                                var expectedmetablocks          = (fe.Metasize + blocksize - 1) / blocksize;
                                                var expectedmetablocklisthashes = (expectedmetablocks + hashes_pr_block - 1) / hashes_pr_block;
                                                if (expectedmetablocks <= 1)
                                                {
                                                    expectedmetablocklisthashes = 0;
                                                }

                                                var metadataid = long.MinValue;
                                                switch (fe.Type)
                                                {
                                                case FilelistEntryType.Folder:
                                                    metadataid = restoredb.AddMetadataset(fe.Metahash, fe.Metasize, fe.MetaBlocklistHashes, expectedmetablocklisthashes, tr);
                                                    restoredb.AddDirectoryEntry(filesetid, fe.Path, fe.Time, metadataid, tr);
                                                    break;

                                                case FilelistEntryType.File:
                                                    var expectedblocks          = (fe.Size + blocksize - 1) / blocksize;
                                                    var expectedblocklisthashes = (expectedblocks + hashes_pr_block - 1) / hashes_pr_block;
                                                    if (expectedblocks <= 1)
                                                    {
                                                        expectedblocklisthashes = 0;
                                                    }

                                                    var blocksetid = restoredb.AddBlockset(fe.Hash, fe.Size, fe.BlocklistHashes, expectedblocklisthashes, tr);
                                                    metadataid = restoredb.AddMetadataset(fe.Metahash, fe.Metasize, fe.MetaBlocklistHashes, expectedmetablocklisthashes, tr);
                                                    restoredb.AddFileEntry(filesetid, fe.Path, fe.Time, blocksetid, metadataid, tr);

                                                    if (fe.Size <= blocksize)
                                                    {
                                                        if (!string.IsNullOrWhiteSpace(fe.Blockhash))
                                                        {
                                                            restoredb.AddSmallBlocksetLink(fe.Hash, fe.Blockhash, fe.Blocksize, tr);
                                                        }
                                                        else if (m_options.BlockHashAlgorithm == m_options.FileHashAlgorithm)
                                                        {
                                                            restoredb.AddSmallBlocksetLink(fe.Hash, fe.Hash, fe.Size, tr);
                                                        }
                                                        else
                                                        {
                                                            Logging.Log.WriteWarningMessage(LOGTAG, "MissingBlockHash", null, "No block hash found for file: {0}", fe.Path);
                                                        }
                                                    }

                                                    break;

                                                case FilelistEntryType.Symlink:
                                                    metadataid = restoredb.AddMetadataset(fe.Metahash, fe.Metasize, fe.MetaBlocklistHashes, expectedmetablocklisthashes, tr);
                                                    restoredb.AddSymlinkEntry(filesetid, fe.Path, fe.Time, metadataid, tr);
                                                    break;

                                                default:
                                                    Logging.Log.WriteWarningMessage(LOGTAG, "SkippingUnknownFileEntry", null, "Skipping file-entry with unknown type {0}: {1} ", fe.Type, fe.Path);
                                                    break;
                                                }

                                                if (fe.Metasize <= blocksize && (fe.Type == FilelistEntryType.Folder || fe.Type == FilelistEntryType.File || fe.Type == FilelistEntryType.Symlink))
                                                {
                                                    if (!string.IsNullOrWhiteSpace(fe.Metablockhash))
                                                    {
                                                        restoredb.AddSmallBlocksetLink(fe.Metahash, fe.Metablockhash, fe.Metasize, tr);
                                                    }
                                                    else if (m_options.BlockHashAlgorithm == m_options.FileHashAlgorithm)
                                                    {
                                                        restoredb.AddSmallBlocksetLink(fe.Metahash, fe.Metahash, fe.Metasize, tr);
                                                    }
                                                    else
                                                    {
                                                        Logging.Log.WriteWarningMessage(LOGTAG, "MissingMetadataBlockHash", null, "No block hash found for file metadata: {0}", fe.Path);
                                                    }
                                                }
                                            }
                                            catch (Exception ex)
                                            {
                                                Logging.Log.WriteWarningMessage(LOGTAG, "FileEntryProcessingFailed", ex, "Failed to process file-entry: {0}", fe.Path);
                                            }
                                        }
                                }
                            }
                            catch (Exception ex)
                            {
                                Logging.Log.WriteWarningMessage(LOGTAG, "FileProcessingFailed", ex, "Failed to process file: {0}", entry.Name);
                                if (ex is System.Threading.ThreadAbortException)
                                {
                                    m_result.EndTime = DateTime.UtcNow;
                                    throw;
                                }

                                if (isFirstFilelist && ex is System.Security.Cryptography.CryptographicException)
                                {
                                    m_result.EndTime = DateTime.UtcNow;
                                    throw;
                                }
                            }
                        }

                        //Make sure we write the config
                        if (!updating)
                        {
                            Utility.VerifyParameters(restoredb, m_options, tr);
                        }

                        using (new Logging.Timer(LOGTAG, "CommitUpdateFilesetFromRemote", "CommitUpdateFilesetFromRemote"))
                            tr.Commit();
                    }

                    if (!m_options.RepairOnlyPaths)
                    {
                        var hashalg = Library.Utility.HashAlgorithmHelper.Create(m_options.BlockHashAlgorithm);
                        if (hashalg == null)
                        {
                            throw new UserInformationException(Strings.Common.InvalidHashAlgorithm(m_options.BlockHashAlgorithm), "BlockHashAlgorithmNotSupported");
                        }
                        var hashsize = hashalg.HashSize / 8;

                        //Grab all index files, and update the block table
                        using (var tr = restoredb.BeginTransaction())
                        {
                            var indexfiles = (
                                from n in remotefiles
                                where n.FileType == RemoteVolumeType.Index
                                select new RemoteVolume(n.File) as IRemoteVolume).ToList();

                            Logging.Log.WriteInformationMessage(LOGTAG, "FilelistsRestored", "Filelists restored, downloading {0} index files", indexfiles.Count);

                            var progress = 0;

                            foreach (var sf in new AsyncDownloader(indexfiles, backend))
                            {
                                try
                                {
                                    if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                                    {
                                        backend.WaitForComplete(restoredb, null);
                                        m_result.EndTime = DateTime.UtcNow;
                                        return;
                                    }

                                    progress++;
                                    m_result.OperationProgressUpdater.UpdateProgress((((float)progress / indexfiles.Count) * 0.5f) + 0.2f);

                                    using (var tmpfile = sf.TempFile)
                                    {
                                        if (sf.Hash != null && sf.Size > 0)
                                        {
                                            restoredb.UpdateRemoteVolume(sf.Name, RemoteVolumeState.Verified, sf.Size, sf.Hash, tr);
                                        }

                                        using (var svr = new IndexVolumeReader(RestoreHandler.GetCompressionModule(sf.Name), tmpfile, m_options, hashsize))
                                        {
                                            foreach (var a in svr.Volumes)
                                            {
                                                var filename = a.Filename;
                                                var volumeID = restoredb.GetRemoteVolumeID(filename);

                                                // No such file
                                                if (volumeID < 0)
                                                {
                                                    volumeID = ProbeForMatchingFilename(ref filename, restoredb);
                                                }

                                                // Still broken, register a missing item
                                                if (volumeID < 0)
                                                {
                                                    var p = VolumeBase.ParseFilename(filename);
                                                    if (p == null)
                                                    {
                                                        throw new Exception(string.Format("Unable to parse filename: {0}", filename));
                                                    }
                                                    Logging.Log.WriteErrorMessage(LOGTAG, "MissingFileDetected", null, "Remote file referenced as {0}, but not found in list, registering a missing remote file", filename);
                                                    volumeID = restoredb.RegisterRemoteVolume(filename, p.FileType, RemoteVolumeState.Verified, tr);
                                                }

                                                //Add all block/volume mappings
                                                foreach (var b in a.Blocks)
                                                {
                                                    restoredb.UpdateBlock(b.Key, b.Value, volumeID, tr);
                                                }

                                                restoredb.UpdateRemoteVolume(filename, RemoteVolumeState.Verified, a.Length, a.Hash, tr);
                                                restoredb.AddIndexBlockLink(restoredb.GetRemoteVolumeID(sf.Name), volumeID, tr);
                                            }

                                            //If there are blocklists in the index file, update the blocklists
                                            foreach (var b in svr.BlockLists)
                                            {
                                                restoredb.UpdateBlockset(b.Hash, b.Blocklist, tr);
                                            }
                                        }
                                    }
                                }
                                catch (Exception ex)
                                {
                                    //Not fatal
                                    Logging.Log.WriteErrorMessage(LOGTAG, "IndexFileProcessingFailed", ex, "Failed to process index file: {0}", sf.Name);
                                    if (ex is System.Threading.ThreadAbortException)
                                    {
                                        m_result.EndTime = DateTime.UtcNow;
                                        throw;
                                    }
                                }
                            }

                            using (new Logging.Timer(LOGTAG, "CommitRecreateDb", "CommitRecreatedDb"))
                                tr.Commit();

                            // TODO: In some cases, we can avoid downloading all index files,
                            // if we are lucky and pick the right ones
                        }

                        // We have now grabbed as much information as possible,
                        // if we are still missing data, we must now fetch block files
                        restoredb.FindMissingBlocklistHashes(hashsize, m_options.Blocksize, null);

                        //We do this in three passes
                        for (var i = 0; i < 3; i++)
                        {
                            // Grab the list matching the pass type
                            var lst = restoredb.GetMissingBlockListVolumes(i, m_options.Blocksize, hashsize).ToList();
                            if (lst.Count > 0)
                            {
                                var fullist = ": " + string.Join(", ", lst.Select(x => x.Name));
                                switch (i)
                                {
                                case 0:
                                    Logging.Log.WriteVerboseMessage(LOGTAG, "ProcessingRequiredBlocklistVolumes", "Processing required {0} blocklist volumes{1}", lst.Count, fullist);
                                    Logging.Log.WriteInformationMessage(LOGTAG, "ProcessingRequiredBlocklistVolumes", "Processing required {0} blocklist volumes{1}", lst.Count, m_options.FullResult ? fullist : string.Empty);
                                    break;

                                case 1:
                                    Logging.Log.WriteVerboseMessage(LOGTAG, "ProbingCandicateBlocklistVolumes", "Probing {0} candidate blocklist volumes{1}", lst.Count, fullist);
                                    Logging.Log.WriteInformationMessage(LOGTAG, "ProbingCandicateBlocklistVolumes", "Probing {0} candidate blocklist volumes{1}", lst.Count, m_options.FullResult ? fullist : string.Empty);
                                    break;

                                default:
                                    Logging.Log.WriteVerboseMessage(LOGTAG, "ProcessingAllBlocklistVolumes", "Processing all of the {0} volumes for blocklists{1}", lst.Count, fullist);
                                    Logging.Log.WriteVerboseMessage(LOGTAG, "ProcessingAllBlocklistVolumes", "Processing all of the {0} volumes for blocklists{1}", lst.Count, m_options.FullResult ? fullist : string.Empty);
                                    break;
                                }
                            }

                            var progress = 0;
                            foreach (var sf in new AsyncDownloader(lst, backend))
                            {
                                using (var tmpfile = sf.TempFile)
                                    using (var rd = new BlockVolumeReader(RestoreHandler.GetCompressionModule(sf.Name), tmpfile, m_options))
                                        using (var tr = restoredb.BeginTransaction())
                                        {
                                            if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                                            {
                                                backend.WaitForComplete(restoredb, null);
                                                m_result.EndTime = DateTime.UtcNow;
                                                return;
                                            }

                                            progress++;
                                            m_result.OperationProgressUpdater.UpdateProgress((((float)progress / lst.Count) * 0.1f) + 0.7f + (i * 0.1f));

                                            var volumeid = restoredb.GetRemoteVolumeID(sf.Name);

                                            restoredb.UpdateRemoteVolume(sf.Name, RemoteVolumeState.Uploaded, sf.Size, sf.Hash, tr);

                                            // Update the block table so we know about the block/volume map
                                            foreach (var h in rd.Blocks)
                                            {
                                                restoredb.UpdateBlock(h.Key, h.Value, volumeid, tr);
                                            }

                                            // Grab all known blocklists from the volume
                                            foreach (var blocklisthash in restoredb.GetBlockLists(volumeid))
                                            {
                                                restoredb.UpdateBlockset(blocklisthash, rd.ReadBlocklist(blocklisthash, hashsize), tr);
                                            }

                                            // Update tables so we know if we are done
                                            restoredb.FindMissingBlocklistHashes(hashsize, m_options.Blocksize, tr);

                                            using (new Logging.Timer(LOGTAG, "CommitRestoredBlocklist", "CommitRestoredBlocklist"))
                                                tr.Commit();

                                            //At this point we can patch files with data from the block volume
                                            if (blockprocessor != null)
                                            {
                                                blockprocessor(sf.Name, rd);
                                            }
                                        }
                            }
                        }
                    }

                    backend.WaitForComplete(restoredb, null);

                    if (m_options.RepairOnlyPaths)
                    {
                        Logging.Log.WriteInformationMessage(LOGTAG, "RecreateOrUpdateOnly", "Recreate/path-update completed, not running consistency checks");
                    }
                    else
                    {
                        Logging.Log.WriteInformationMessage(LOGTAG, "RecreateCompletedCheckingDatabase", "Recreate completed, verifying the database consistency");

                        //All done, we must verify that we have all blocklist fully intact
                        // if this fails, the db will not be deleted, so it can be used,
                        // except to continue a backup
                        m_result.EndTime = DateTime.UtcNow;

                        using (var lbfdb = new LocalListBrokenFilesDatabase(restoredb))
                        {
                            var broken = lbfdb.GetBrokenFilesets(new DateTime(0), null, null).Count();
                            if (broken != 0)
                            {
                                throw new UserInformationException(string.Format("Recreated database has missing blocks and {0} broken filelists. Consider using \"{1}\" and \"{2}\" to purge broken data from the remote store and the database.", broken, "list-broken-files", "purge-broken-files"), "DatabaseIsBrokenConsiderPurge");
                            }
                        }

                        restoredb.VerifyConsistency(m_options.Blocksize, m_options.BlockhashSize, true, null);

                        Logging.Log.WriteInformationMessage(LOGTAG, "RecreateCompleted", "Recreate completed, and consistency checks completed, marking database as complete");

                        restoredb.RepairInProgress = false;
                    }

                    m_result.EndTime = DateTime.UtcNow;
                }
        }
Пример #16
0
        public void DoRun(Database.LocalDeleteDatabase db, ref System.Data.IDbTransaction transaction, bool hasVerifiedBacked, bool forceCompact, BackendManager sharedManager)
        {
            // Workaround where we allow a running backendmanager to be used
            using (var bk = sharedManager == null ? new BackendManager(m_backendurl, m_options, m_result.BackendWriter, db) : null)
            {
                var backend = bk ?? sharedManager;

                if (!hasVerifiedBacked && !m_options.NoBackendverification)
                {
                    var backupDatabase = new LocalBackupDatabase(db, m_options);
                    var latestFilelist = backupDatabase.GetTemporaryFilelistVolumeNames(latestOnly: true, transaction: transaction);
                    FilelistProcessor.VerifyRemoteList(backend, m_options, db, m_result.BackendWriter, latestFilelist);
                }

                IListResultFileset[]      filesets         = db.FilesetsWithBackupVersion.ToArray();
                List <IListResultFileset> versionsToDelete = new List <IListResultFileset>();
                versionsToDelete.AddRange(new SpecificVersionsRemover(this.m_options).GetFilesetsToDelete(filesets));
                versionsToDelete.AddRange(new KeepTimeRemover(this.m_options).GetFilesetsToDelete(filesets));
                versionsToDelete.AddRange(new RetentionPolicyRemover(this.m_options).GetFilesetsToDelete(filesets));

                // When determining the number of full versions to keep, we need to ignore the versions already marked for removal.
                versionsToDelete.AddRange(new KeepVersionsRemover(this.m_options).GetFilesetsToDelete(filesets.Except(versionsToDelete)));

                if (!m_options.AllowFullRemoval && filesets.Length == versionsToDelete.Count)
                {
                    Logging.Log.WriteInformationMessage(LOGTAG, "PreventingLastFilesetRemoval", "Preventing removal of last fileset, use --{0} to allow removal ...", "allow-full-removal");
                    versionsToDelete = versionsToDelete.OrderBy(x => x.Version).Skip(1).ToList();
                }

                if (versionsToDelete.Count > 0)
                {
                    Logging.Log.WriteInformationMessage(LOGTAG, "DeleteRemoteFileset", "Deleting {0} remote fileset(s) ...", versionsToDelete.Count);
                }

                var lst = db.DropFilesetsFromTable(versionsToDelete.Select(x => x.Time).ToArray(), transaction).ToArray();
                foreach (var f in lst)
                {
                    db.UpdateRemoteVolume(f.Key, RemoteVolumeState.Deleting, f.Value, null, transaction);
                }

                if (!m_options.Dryrun)
                {
                    transaction.Commit();
                    transaction = db.BeginTransaction();
                }

                foreach (var f in lst)
                {
                    if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                    {
                        backend.WaitForComplete(db, transaction);
                        return;
                    }

                    if (!m_options.Dryrun)
                    {
                        backend.Delete(f.Key, f.Value);
                    }
                    else
                    {
                        Logging.Log.WriteDryrunMessage(LOGTAG, "WouldDeleteRemoteFileset", "Would delete remote fileset: {0}", f.Key);
                    }
                }

                if (sharedManager == null)
                {
                    backend.WaitForComplete(db, transaction);
                }
                else
                {
                    backend.WaitForEmpty(db, transaction);
                }

                var count = lst.Length;
                if (!m_options.Dryrun)
                {
                    if (count == 0)
                    {
                        Logging.Log.WriteInformationMessage(LOGTAG, "DeleteResults", "No remote filesets were deleted");
                    }
                    else
                    {
                        Logging.Log.WriteInformationMessage(LOGTAG, "DeleteResults", "Deleted {0} remote fileset(s)", count);
                    }
                }
                else
                {
                    if (count == 0)
                    {
                        Logging.Log.WriteDryrunMessage(LOGTAG, "WouldDeleteResults", "No remote filesets would be deleted");
                    }
                    else
                    {
                        Logging.Log.WriteDryrunMessage(LOGTAG, "WouldDeleteResults", "{0} remote fileset(s) would be deleted", count);
                    }

                    if (count > 0 && m_options.Dryrun)
                    {
                        Logging.Log.WriteDryrunMessage(LOGTAG, "WouldDeleteHelp", "Remove --dry-run to actually delete files");
                    }
                }

                if (!m_options.NoAutoCompact && (forceCompact || versionsToDelete.Count > 0))
                {
                    m_result.CompactResults = new CompactResults(m_result);
                    new CompactHandler(m_backendurl, m_options, (CompactResults)m_result.CompactResults).DoCompact(db, true, ref transaction, sharedManager);
                }

                m_result.SetResults(versionsToDelete.Select(v => new Tuple <long, DateTime>(v.Version, v.Time)), m_options.Dryrun);
            }
        }
        public void Run(IEnumerable<string> filterstrings = null, Library.Utility.IFilter compositefilter = null)
        {
            if (string.IsNullOrEmpty(m_options.Restorepath))
                throw new Exception("Cannot restore control files without --restore-path");
            if (!System.IO.Directory.Exists(m_options.Restorepath))
                System.IO.Directory.CreateDirectory(m_options.Restorepath);
        
            using (var tmpdb = new Library.Utility.TempFile())
            using (var db = new Database.LocalDatabase(System.IO.File.Exists(m_options.Dbpath) ? m_options.Dbpath : (string)tmpdb, "RestoreControlFiles"))
            using (var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, db))
            {
                m_result.SetDatabase(db);
                
                var filter = Library.Utility.JoinedFilterExpression.Join(new Library.Utility.FilterExpression(filterstrings), compositefilter);
                
            	try
            	{
                    var filteredList = ListFilesHandler.ParseAndFilterFilesets(backend.List(), m_options);
                    if (filteredList.Count == 0)
                        throw new Exception("No filesets found on remote target");
    
	                Exception lastEx = new Exception("No suitable files found on remote target");
	
	                foreach(var fileversion in filteredList)
	                    try
	                    {
                            if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                            {
                                backend.WaitForComplete(db, null);
                                return;
                            }    
                        
                            var file = fileversion.Value.File;
	                        long size;
	                        string hash;
	                        RemoteVolumeType type;
	                        RemoteVolumeState state;
	                        if (!db.GetRemoteVolume(file.Name, out hash, out size, out type, out state))
	                            size = file.Size;
	
                            var res = new List<string>();
							using (var tmpfile = backend.Get(file.Name, size, hash))
	                        using (var tmp = new Volumes.FilesetVolumeReader(RestoreHandler.GetCompressionModule(file.Name), tmpfile, m_options))
	                            foreach (var cf in tmp.ControlFiles)
                                    if (Library.Utility.FilterExpression.Matches(filter, cf.Key))
                                    {
                                        var targetpath = System.IO.Path.Combine(m_options.Restorepath, cf.Key);
    	                                using (var ts = System.IO.File.Create(targetpath))
    	                                    Library.Utility.Utility.CopyStream(cf.Value, ts);
                                        res.Add(targetpath);
                                    }
	                        
                            m_result.SetResult(res);
                            
	                        lastEx = null;
	                        break;
	                    }
	                    catch(Exception ex)
	                    {
	                        lastEx = ex;
                            if (ex is System.Threading.ThreadAbortException)
                                throw;
	                    }
	
	                if (lastEx != null)
	                    throw lastEx;
	        	}
	        	finally
	        	{
	        		backend.WaitForComplete(db, null);
	        	}

                db.WriteResults();
            }
        }
Пример #18
0
        /// <summary>
        /// Helper method that verifies uploaded volumes and updates their state in the database.
        /// Throws an error if there are issues with the remote storage
        /// </summary>
        /// <param name="backend">The backend instance to use</param>
        /// <param name="options">The options used</param>
        /// <param name="database">The database to compare with</param>
        /// <param name="protectedFiles">Filenames that should be exempted from deletion</param>
        public static RemoteAnalysisResult RemoteListAnalysis(BackendManager backend, Options options, LocalDatabase database, IBackendWriter log, IEnumerable <string> protectedFiles)
        {
            var rawlist = backend.List();
            var lookup  = new Dictionary <string, Volumes.IParsedVolume>();

            protectedFiles = protectedFiles ?? Enumerable.Empty <string>();

            var remotelist = (from n in rawlist
                              let p = Volumes.VolumeBase.ParseFilename(n)
                                      where p != null && p.Prefix == options.Prefix
                                      select p).ToList();

            var otherlist = (from n in rawlist
                             let p = Volumes.VolumeBase.ParseFilename(n)
                                     where p != null && p.Prefix != options.Prefix
                                     select p).ToList();

            var unknownlist = (from n in rawlist
                               let p = Volumes.VolumeBase.ParseFilename(n)
                                       where p == null
                                       select n).ToList();

            var filesets = (from n in remotelist
                            where n.FileType == RemoteVolumeType.Files orderby n.Time descending
                            select n).ToList();

            log.KnownFileCount = remotelist.Count;
            long knownFileSize = remotelist.Select(x => Math.Max(0, x.File.Size)).Sum();

            log.KnownFileSize    = knownFileSize;
            log.UnknownFileCount = unknownlist.Count;
            log.UnknownFileSize  = unknownlist.Select(x => Math.Max(0, x.Size)).Sum();
            log.BackupListCount  = database.FilesetTimes.Count();
            log.LastBackupDate   = filesets.Count == 0 ? new DateTime(0) : filesets[0].Time.ToLocalTime();

            // TODO: We should query through the backendmanager
            using (var bk = DynamicLoader.BackendLoader.GetBackend(backend.BackendUrl, options.RawOptions))
                if (bk is IQuotaEnabledBackend enabledBackend)
                {
                    Library.Interface.IQuotaInfo quota = enabledBackend.Quota;
                    if (quota != null)
                    {
                        log.TotalQuotaSpace = quota.TotalQuotaSpace;
                        log.FreeQuotaSpace  = quota.FreeQuotaSpace;

                        // Check to see if there should be a warning or error about the quota
                        // Since this processor may be called multiple times during a backup
                        // (both at the start and end, for example), the log keeps track of
                        // whether a quota error or warning has been sent already.
                        // Note that an error can still be sent later even if a warning was sent earlier.
                        if (!log.ReportedQuotaError && quota.FreeQuotaSpace == 0)
                        {
                            log.ReportedQuotaError = true;
                            Logging.Log.WriteErrorMessage(LOGTAG, "BackendQuotaExceeded", null, "Backend quota has been exceeded: Using {0} of {1} ({2} available)", Library.Utility.Utility.FormatSizeString(knownFileSize), Library.Utility.Utility.FormatSizeString(quota.TotalQuotaSpace), Library.Utility.Utility.FormatSizeString(quota.FreeQuotaSpace));
                        }
                        else if (!log.ReportedQuotaWarning && !log.ReportedQuotaError && quota.FreeQuotaSpace >= 0) // Negative value means the backend didn't return the quota info
                        {
                            // Warnings are sent if the available free space is less than the given percentage of the total backup size.
                            double warningThreshold = options.QuotaWarningThreshold / (double)100;
                            if (quota.FreeQuotaSpace < warningThreshold * knownFileSize)
                            {
                                log.ReportedQuotaWarning = true;
                                Logging.Log.WriteWarningMessage(LOGTAG, "BackendQuotaNear", null, "Backend quota is close to being exceeded: Using {0} of {1} ({2} available)", Library.Utility.Utility.FormatSizeString(knownFileSize), Library.Utility.Utility.FormatSizeString(quota.TotalQuotaSpace), Library.Utility.Utility.FormatSizeString(quota.FreeQuotaSpace));
                            }
                        }
                    }
                }

            log.AssignedQuotaSpace = options.QuotaSize;

            foreach (var s in remotelist)
            {
                lookup[s.File.Name] = s;
            }

            var missing     = new List <RemoteVolumeEntry>();
            var missingHash = new List <Tuple <long, RemoteVolumeEntry> >();
            var cleanupRemovedRemoteVolumes = new HashSet <string>();

            foreach (var e in database.DuplicateRemoteVolumes())
            {
                if (e.Value == RemoteVolumeState.Uploading || e.Value == RemoteVolumeState.Temporary)
                {
                    database.UnlinkRemoteVolume(e.Key, e.Value);
                }
                else
                {
                    throw new Exception(string.Format("The remote volume {0} appears in the database with state {1} and a deleted state, cannot continue", e.Key, e.Value.ToString()));
                }
            }

            var locallist = database.GetRemoteVolumes();

            foreach (var i in locallist)
            {
                Volumes.IParsedVolume r;
                var remoteFound = lookup.TryGetValue(i.Name, out r);
                var correctSize = remoteFound && i.Size >= 0 && (i.Size == r.File.Size || r.File.Size < 0);

                lookup.Remove(i.Name);

                switch (i.State)
                {
                case RemoteVolumeState.Deleted:
                    if (remoteFound)
                    {
                        Logging.Log.WriteInformationMessage(LOGTAG, "IgnoreRemoteDeletedFile", "ignoring remote file listed as {0}: {1}", i.State, i.Name);
                    }

                    break;

                case RemoteVolumeState.Temporary:
                case RemoteVolumeState.Deleting:
                    if (remoteFound)
                    {
                        Logging.Log.WriteInformationMessage(LOGTAG, "RemoveUnwantedRemoteFile", "removing remote file listed as {0}: {1}", i.State, i.Name);
                        backend.Delete(i.Name, i.Size, true);
                    }
                    else
                    {
                        if (i.DeleteGracePeriod > DateTime.UtcNow)
                        {
                            Logging.Log.WriteInformationMessage(LOGTAG, "KeepDeleteRequest", "keeping delete request for {0} until {1}", i.Name, i.DeleteGracePeriod.ToLocalTime());
                        }
                        else
                        {
                            if (i.State == RemoteVolumeState.Temporary && protectedFiles.Any(pf => pf == i.Name))
                            {
                                Logging.Log.WriteInformationMessage(LOGTAG, "KeepIncompleteFile", "keeping protected incomplete remote file listed as {0}: {1}", i.State, i.Name);
                            }
                            else
                            {
                                Logging.Log.WriteInformationMessage(LOGTAG, "RemoteUnwantedMissingFile", "removing file listed as {0}: {1}", i.State, i.Name);
                                cleanupRemovedRemoteVolumes.Add(i.Name);
                            }
                        }
                    }
                    break;

                case RemoteVolumeState.Uploading:
                    if (remoteFound && correctSize && r.File.Size >= 0)
                    {
                        Logging.Log.WriteInformationMessage(LOGTAG, "PromotingCompleteFile", "promoting uploaded complete file from {0} to {2}: {1}", i.State, i.Name, RemoteVolumeState.Uploaded);
                        database.UpdateRemoteVolume(i.Name, RemoteVolumeState.Uploaded, i.Size, i.Hash);
                    }
                    else if (!remoteFound)
                    {
                        if (protectedFiles.Any(pf => pf == i.Name))
                        {
                            Logging.Log.WriteInformationMessage(LOGTAG, "KeepIncompleteFile", "keeping protected incomplete remote file listed as {0}: {1}", i.State, i.Name);
                            database.UpdateRemoteVolume(i.Name, RemoteVolumeState.Temporary, i.Size, i.Hash, false, new TimeSpan(0), null);
                        }
                        else
                        {
                            Logging.Log.WriteInformationMessage(LOGTAG, "SchedulingMissingFileForDelete", "scheduling missing file for deletion, currently listed as {0}: {1}", i.State, i.Name);
                            cleanupRemovedRemoteVolumes.Add(i.Name);
                            database.UpdateRemoteVolume(i.Name, RemoteVolumeState.Deleting, i.Size, i.Hash, false, TimeSpan.FromHours(2), null);
                        }
                    }
                    else
                    {
                        if (protectedFiles.Any(pf => pf == i.Name))
                        {
                            Logging.Log.WriteInformationMessage(LOGTAG, "KeepIncompleteFile", "keeping protected incomplete remote file listed as {0}: {1}", i.State, i.Name);
                        }
                        else
                        {
                            Logging.Log.WriteInformationMessage(LOGTAG, "Remove incomplete file", "removing incomplete remote file listed as {0}: {1}", i.State, i.Name);
                            backend.Delete(i.Name, i.Size, true);
                        }
                    }
                    break;

                case RemoteVolumeState.Uploaded:
                    if (!remoteFound)
                    {
                        missing.Add(i);
                    }
                    else if (correctSize)
                    {
                        database.UpdateRemoteVolume(i.Name, RemoteVolumeState.Verified, i.Size, i.Hash);
                    }
                    else
                    {
                        missingHash.Add(new Tuple <long, RemoteVolumeEntry>(r.File.Size, i));
                    }

                    break;

                case RemoteVolumeState.Verified:
                    if (!remoteFound)
                    {
                        missing.Add(i);
                    }
                    else if (!correctSize)
                    {
                        missingHash.Add(new Tuple <long, RemoteVolumeEntry>(r.File.Size, i));
                    }

                    break;

                default:
                    Logging.Log.WriteWarningMessage(LOGTAG, "UnknownFileState", null, "unknown state for remote file listed as {0}: {1}", i.State, i.Name);
                    break;
                }

                backend.FlushDbMessages();
            }

            // cleanup deleted volumes in DB en block
            database.RemoveRemoteVolumes(cleanupRemovedRemoteVolumes, null);

            foreach (var i in missingHash)
            {
                Logging.Log.WriteWarningMessage(LOGTAG, "MissingRemoteHash", null, "remote file {1} is listed as {0} with size {2} but should be {3}, please verify the sha256 hash \"{4}\"", i.Item2.State, i.Item2.Name, i.Item1, i.Item2.Size, i.Item2.Hash);
            }

            return(new RemoteAnalysisResult()
            {
                ParsedVolumes = remotelist,
                OtherVolumes = otherlist,
                ExtraVolumes = lookup.Values,
                MissingVolumes = missing,
                VerificationRequiredVolumes = missingHash.Select(x => x.Item2)
            });
        }
Пример #19
0
        public override string ToString()
        {
            var s = BackendManager.GetServer(ServerName);

            return(string.Format("[" + ServerName + " " + (s == null ? "unloaded" : s.ToString()) + " " + Players.Length + " players]"));
        }
Пример #20
0
        public void Setup()
        {
            backendMocks = new List<Mock<IBackend>> ();
            createFunc = () => {
                var mock = new Mock<IBackend> ();
                backendMocks.Add (mock);
                return mock.Object;
            };
            backendInfo1Mock = new Mock<BackendInfo> ("My backend1", "descr", createFunc);
            backendInfo2Mock = new Mock<BackendInfo> ("My backend2", "descr", createFunc);
            var backendInfos = new Collection<BackendInfo> { backendInfo1Mock.Object, backendInfo2Mock.Object };

            var prefsMock = new Mock<IPreferences> ();
            bm = new BackendManager (backendInfos, prefsMock.Object);
        }
Пример #21
0
        /// <summary>
        /// Run the recreate procedure
        /// </summary>
        /// <param name="dbparent">The database to restore into</param>
        /// <param name="updating">True if this is an update call, false otherwise</param>
        /// <param name="filter">A filter that can be used to disregard certain remote files, intended to be used to select a certain filelist</param>
        /// <param name="filenamefilter">Filters the files in a filelist to prevent downloading unwanted data</param>
        /// <param name="blockprocessor">A callback hook that can be used to work with downloaded block volumes, intended to be use to recover data blocks while processing blocklists</param>
        internal void DoRun(LocalDatabase dbparent, bool updating, Library.Utility.IFilter filter = null, NumberedFilterFilelistDelegate filelistfilter = null, BlockVolumePostProcessor blockprocessor = null)
        {
            m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Recreate_Running);

            //We build a local database in steps.
            using(var restoredb = new LocalRecreateDatabase(dbparent, m_options))
            using(var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, restoredb))
            {
				restoredb.RepairInProgress = true;

                var volumeIds = new Dictionary<string, long>();

                var rawlist = backend.List();
		
                //First step is to examine the remote storage to see what
                // kind of data we can find
                var remotefiles =
                (from x in rawlist
                let n = VolumeBase.ParseFilename(x)
                where
                    n != null
                        &&
                    n.Prefix == m_options.Prefix
                select n).ToArray(); //ToArray() ensures that we do not remote-request it multiple times

                if (remotefiles.Length == 0)
                {
                    if (rawlist.Count == 0)
                        throw new Exception("No files were found at the remote location, perhaps the target url is incorrect?");
                    else
                    {
                        var tmp = 
					(from x in rawlist
                		let n = VolumeBase.ParseFilename(x)
                	where
                    	n != null
                    select n.Prefix).ToArray();
                
                        var types = tmp.Distinct().ToArray();
                        if (tmp.Length == 0)
                            throw new Exception(string.Format("Found {0} files at the remote storage, but none that could be parsed", rawlist.Count));
                        else if (types.Length == 1)
                            throw new Exception(string.Format("Found {0} parse-able files with the prefix {1}, did you forget to set the backup-prefix?", tmp.Length, types[0]));
                        else
                            throw new Exception(string.Format("Found {0} parse-able files (of {1} files) with different prefixes: {2}, did you forget to set the backup-prefix?", tmp.Length, rawlist.Count, string.Join(", ", types)));
                    }
                }

                //Then we select the filelist we should work with,
                // and create the filelist table to fit
                IEnumerable<IParsedVolume> filelists =
                    from n in remotefiles
                    where n.FileType == RemoteVolumeType.Files
                    orderby n.Time descending
                    select n;

                if (filelists.Count() <= 0)
                    throw new Exception(string.Format("No filelists found on the remote destination"));
                
                if (filelistfilter != null)
                    filelists = filelistfilter(filelists).Select(x => x.Value).ToArray();

                if (filelists.Count() <= 0)
                    throw new Exception(string.Format("No filelists"));

                // If we are updating, all files should be accounted for
                foreach(var fl in remotefiles)
                    volumeIds[fl.File.Name] = updating ? restoredb.GetRemoteVolumeID(fl.File.Name) : restoredb.RegisterRemoteVolume(fl.File.Name, fl.FileType, fl.File.Size, RemoteVolumeState.Uploaded);

                var hasUpdatedOptions = false;

                if (updating)
                {
                    Utility.UpdateOptionsFromDb(restoredb, m_options);
                    Utility.VerifyParameters(restoredb, m_options);
                }

                //Record all blocksets and files needed
                using(var tr = restoredb.BeginTransaction())
                {
                    var filelistWork = (from n in filelists orderby n.Time select new RemoteVolume(n.File) as IRemoteVolume).ToList();
                    m_result.AddMessage(string.Format("Rebuild database started, downloading {0} filelists", filelistWork.Count));

                    var progress = 0;

                    // Register the files we are working with, if not already updated
                    if (updating)
                    {
                        foreach(var n in filelists)
                            if (volumeIds[n.File.Name] == -1)
                                volumeIds[n.File.Name] = restoredb.RegisterRemoteVolume(n.File.Name, n.FileType, RemoteVolumeState.Uploaded, n.File.Size, new TimeSpan(0), tr);
                    }
                                
                    var isFirstFilelist = true;
                    var blocksize = m_options.Blocksize;
                    var hashes_pr_block = blocksize / m_options.BlockhashSize;

                    foreach(var entry in new AsyncDownloader(filelistWork, backend))
                        try
                        {
                            if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                            {
                                backend.WaitForComplete(restoredb, null);
                                return;
                            }    
                        
                            progress++;
                            if (filelistWork.Count == 1 && m_options.RepairOnlyPaths)
                                m_result.OperationProgressUpdater.UpdateProgress(0.5f);
                            else
                                m_result.OperationProgressUpdater.UpdateProgress(((float)progress / filelistWork.Count()) * (m_options.RepairOnlyPaths ? 1f : 0.2f));

                            using(var tmpfile = entry.TempFile)
                            {
                                isFirstFilelist = false;

                                if (entry.Hash != null && entry.Size > 0)
                                    restoredb.UpdateRemoteVolume(entry.Name, RemoteVolumeState.Verified, entry.Size, entry.Hash, tr);

                                var parsed = VolumeBase.ParseFilename(entry.Name);

                                if (!hasUpdatedOptions && !updating) 
                                {
                                    VolumeReaderBase.UpdateOptionsFromManifest(parsed.CompressionModule, tmpfile, m_options);
                                    hasUpdatedOptions = true;
                                    // Recompute the cached sizes
                                    blocksize = m_options.Blocksize;
                                    hashes_pr_block = blocksize / m_options.BlockhashSize;
                                }


                                // Create timestamped operations based on the file timestamp
                                var filesetid = restoredb.CreateFileset(volumeIds[entry.Name], parsed.Time, tr);
                                using(var filelistreader = new FilesetVolumeReader(parsed.CompressionModule, tmpfile, m_options))
                                    foreach(var fe in filelistreader.Files.Where(x => Library.Utility.FilterExpression.Matches(filter, x.Path)))
                                    {
                                        try
                                        {
                                            if (fe.Type == FilelistEntryType.Folder)
                                            {
                                                restoredb.AddDirectoryEntry(filesetid, fe.Path, fe.Time, fe.Metahash, fe.Metahash == null ? -1 : fe.Metasize, tr);
                                            }
                                            else if (fe.Type == FilelistEntryType.File)
                                            {
                                                var expectedblocks = (fe.Size + blocksize - 1)  / blocksize;
                                                var expectedblocklisthashes = (expectedblocks + hashes_pr_block - 1) / hashes_pr_block;
                                                if (expectedblocks <= 1) expectedblocklisthashes = 0;

                                                var blocksetid = restoredb.AddBlockset(fe.Hash, fe.Size, fe.BlocklistHashes, expectedblocklisthashes, tr);
                                                restoredb.AddFileEntry(filesetid, fe.Path, fe.Time, blocksetid, fe.Metahash, fe.Metahash == null ? -1 : fe.Metasize, tr);
                                            }
                                            else if (fe.Type == FilelistEntryType.Symlink)
                                            {
                                                restoredb.AddSymlinkEntry(filesetid, fe.Path, fe.Time, fe.Metahash, fe.Metahash == null ? -1 : fe.Metasize, tr);
                                            }
                                            else
                                            {
                                                m_result.AddWarning(string.Format("Skipping file-entry with unknown type {0}: {1} ", fe.Type, fe.Path), null);
                                            }
                                        }
                                        catch (Exception ex)
                                        {
                                            m_result.AddWarning(string.Format("Failed to process file-entry: {0}", fe.Path), ex);
                                        }
                                    }
                            }
                        }
                        catch (Exception ex)
                        {
                            m_result.AddWarning(string.Format("Failed to process file: {0}", entry.Name), ex);
                            if (ex is System.Threading.ThreadAbortException)
                                throw;

                            if (isFirstFilelist && ex is System.Security.Cryptography.CryptographicException)
                                throw;
                        }

                    //Make sure we write the config
                    if (!updating)
                        Utility.VerifyParameters(restoredb, m_options, tr);

                    using(new Logging.Timer("CommitUpdateFilesetFromRemote"))
                        tr.Commit();
                }
            
                if (!m_options.RepairOnlyPaths)
                {
                    var hashalg = System.Security.Cryptography.HashAlgorithm.Create(m_options.BlockHashAlgorithm);
                    if (hashalg == null)
                        throw new Exception(Strings.Common.InvalidHashAlgorithm(m_options.BlockHashAlgorithm));
                    var hashsize = hashalg.HashSize / 8;

                    //Grab all index files, and update the block table
                    using(var tr = restoredb.BeginTransaction())
                    {
                        var indexfiles = (
                                         from n in remotefiles
                                          where n.FileType == RemoteVolumeType.Index
                                          select new RemoteVolume(n.File) as IRemoteVolume).ToList();

                        m_result.AddMessage(string.Format("Filelists restored, downloading {0} index files", indexfiles.Count));

                        var progress = 0;
                                    
                        foreach(var sf in new AsyncDownloader(indexfiles, backend))
                            try
                            {
                                if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                                {
                                    backend.WaitForComplete(restoredb, null);
                                    return;
                                }

                                progress++;
                                m_result.OperationProgressUpdater.UpdateProgress((((float)progress / indexfiles.Count) * 0.5f) + 0.2f);

                                using(var tmpfile = sf.TempFile)
                                {
                                    if (sf.Hash != null && sf.Size > 0)
                                        restoredb.UpdateRemoteVolume(sf.Name, RemoteVolumeState.Verified, sf.Size, sf.Hash, tr);
                
                                    using(var svr = new IndexVolumeReader(RestoreHandler.GetCompressionModule(sf.Name), tmpfile, m_options, hashsize))
                                    {
                                        foreach(var a in svr.Volumes)
                                        {
                                            var filename = a.Filename;
                                            var volumeID = restoredb.GetRemoteVolumeID(filename);

                                            // No such file
                                            if (volumeID < 0)
                                                volumeID = ProbeForMatchingFilename(ref filename, restoredb);

                                            // Still broken, register a missing item
                                            if (volumeID < 0)
                                            {
                                                var p = VolumeBase.ParseFilename(filename);
                                                if (p == null)
                                                    throw new Exception(string.Format("Unable to parse filename: {0}", filename));
                                                m_result.AddError(string.Format("Remote file referenced as {0}, but not found in list, registering a missing remote file", filename), null);
                                                volumeID = restoredb.RegisterRemoteVolume(filename, p.FileType, RemoteVolumeState.Verified, tr);
                                            }
                                            
                                            //Add all block/volume mappings
                                            foreach(var b in a.Blocks)
                                                restoredb.UpdateBlock(b.Key, b.Value, volumeID, tr);

                                            restoredb.UpdateRemoteVolume(filename, RemoteVolumeState.Verified, a.Length, a.Hash, tr);
                                            restoredb.AddIndexBlockLink(restoredb.GetRemoteVolumeID(sf.Name), volumeID, tr);
                                        }
                                
                                        //If there are blocklists in the index file, update the blocklists
                                        foreach(var b in svr.BlockLists)
                                            restoredb.UpdateBlockset(b.Hash, b.Blocklist, tr);
                                    }
                                }
                            }
                            catch (Exception ex)
                            {
                                //Not fatal
                                m_result.AddWarning(string.Format("Failed to process index file: {0}", sf.Name), ex);
                                if (ex is System.Threading.ThreadAbortException)
                                    throw;
                            }

                        using(new Logging.Timer("CommitRecreatedDb"))
                            tr.Commit();
                    
                        // TODO: In some cases, we can avoid downloading all index files, 
                        // if we are lucky and pick the right ones
                    }

                    // We have now grabbed as much information as possible,
                    // if we are still missing data, we must now fetch block files
                    restoredb.FindMissingBlocklistHashes(hashsize, m_options.Blocksize, null);
                
                    //We do this in three passes
                    for(var i = 0; i < 3; i++)
                    {
                        // Grab the list matching the pass type
						var lst = restoredb.GetMissingBlockListVolumes(i, m_options.Blocksize, hashsize).ToList();
                        if (lst.Count > 0)
                        {
                            switch (i)
                            {
                                case 0:
                                    if (m_options.Verbose)
                                        m_result.AddVerboseMessage("Processing required {0} blocklist volumes: {1}", lst.Count, string.Join(", ", lst.Select(x => x.Name)));
                                    else
                                        m_result.AddMessage(string.Format("Processing required {0} blocklist volumes", lst.Count));
                                    break;
                                case 1:
                                    if (m_options.Verbose)
                                        m_result.AddVerboseMessage("Probing {0} candidate blocklist volumes: {1}", lst.Count, string.Join(", ", lst.Select(x => x.Name)));
                                    else
                                        m_result.AddMessage(string.Format("Probing {0} candidate blocklist volumes", lst.Count));
                                    break;
                                default:
                                    if (m_options.Verbose)
                                        m_result.AddVerboseMessage("Processing all of the {0} volumes for blocklists: {1}", lst.Count, string.Join(", ", lst.Select(x => x.Name)));
                                    else
                                        m_result.AddMessage(string.Format("Processing all of the {0} volumes for blocklists", lst.Count));
                                    break;
                            }
                        }

                        var progress = 0;
                        foreach(var sf in new AsyncDownloader(lst, backend))
                            using(var tmpfile = sf.TempFile)
                            using(var rd = new BlockVolumeReader(RestoreHandler.GetCompressionModule(sf.Name), tmpfile, m_options))
                            using(var tr = restoredb.BeginTransaction())
                            {
                                if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                                {
                                    backend.WaitForComplete(restoredb, null);
                                    return;
                                }    
                            
                                progress++;
                                m_result.OperationProgressUpdater.UpdateProgress((((float)progress / lst.Count) * 0.1f) + 0.7f + (i * 0.1f));

                                var volumeid = restoredb.GetRemoteVolumeID(sf.Name);

                                restoredb.UpdateRemoteVolume(sf.Name, RemoteVolumeState.Uploaded, sf.Size, sf.Hash, tr);
                            
                                // Update the block table so we know about the block/volume map
                                foreach(var h in rd.Blocks)
                                    restoredb.UpdateBlock(h.Key, h.Value, volumeid, tr);
                            
                                // Grab all known blocklists from the volume
                                foreach(var blocklisthash in restoredb.GetBlockLists(volumeid))
                                    restoredb.UpdateBlockset(blocklisthash, rd.ReadBlocklist(blocklisthash, hashsize), tr);
    
                                // Update tables so we know if we are done
                                restoredb.FindMissingBlocklistHashes(hashsize, m_options.Blocksize, tr);
                        
                                using(new Logging.Timer("CommitRestoredBlocklist"))
                                    tr.Commit();
    
                                //At this point we can patch files with data from the block volume
                                if (blockprocessor != null)
                                    blockprocessor(sf.Name, rd);
                            }
                    }
                }
                
				backend.WaitForComplete(restoredb, null);

                m_result.AddMessage("Recreate completed, verifying the database consistency");

                //All done, we must verify that we have all blocklist fully intact
                // if this fails, the db will not be deleted, so it can be used,
                // except to continue a backup
                restoredb.VerifyConsistency(null, m_options.Blocksize, m_options.BlockhashSize);

                m_result.AddMessage("Recreate completed, and consistency checks completed, marking database as complete");

				restoredb.RepairInProgress = false;
            }
        }
Пример #22
0
        private bool HandleFilesystemEntry(Snapshots.ISnapshotService snapshot, BackendManager backend, string path, System.IO.FileAttributes attributes)
        {
            // If we lost the connection, there is no point in keeping on processing
            if (backend.HasDied)
                throw backend.LastException;
            
            try
            {
                m_result.OperationProgressUpdater.StartFile(path, -1);            
                
                if (m_backendLogFlushTimer < DateTime.Now)
                {
                    m_backendLogFlushTimer = DateTime.Now.Add(FLUSH_TIMESPAN);
                    backend.FlushDbMessages(m_database, null);
                }

                DateTime lastwrite = new DateTime(0, DateTimeKind.Utc);
                try 
                { 
                    lastwrite = snapshot.GetLastWriteTimeUtc(path); 
                }
                catch (Exception ex) 
                {
                    m_result.AddWarning(string.Format("Failed to read timestamp on \"{0}\"", path), ex);
                }
                                            
                if ((attributes & FileAttributes.ReparsePoint) == FileAttributes.ReparsePoint)
                {
                    if (m_options.SymlinkPolicy == Options.SymlinkStrategy.Ignore)
                    {
                        m_result.AddVerboseMessage("Ignoring symlink {0}", path);
                        return false;
                    }
    
                    if (m_options.SymlinkPolicy == Options.SymlinkStrategy.Store)
                    {
                        Dictionary<string, string> metadata = GenerateMetadata(snapshot, path, attributes);
    
                        if (!metadata.ContainsKey("CoreSymlinkTarget"))
						{
							var p = snapshot.GetSymlinkTarget(path);

							if (string.IsNullOrWhiteSpace(p))
								m_result.AddVerboseMessage("Ignoring empty symlink {0}", path);
							else
                            	metadata["CoreSymlinkTarget"] = p;
						}
    
                        var metahash = Utility.WrapMetadata(metadata, m_options);
                        AddSymlinkToOutput(backend, path, DateTime.UtcNow, metahash);
                        
                        m_result.AddVerboseMessage("Stored symlink {0}", path);
                        //Do not recurse symlinks
                        return false;
                    }
                }
    
                if ((attributes & FileAttributes.Directory) == FileAttributes.Directory)
                {
                    IMetahash metahash;
    
                    if (m_options.StoreMetadata)
                    {
                        metahash = Utility.WrapMetadata(GenerateMetadata(snapshot, path, attributes), m_options);
                    }
                    else
                    {
                        metahash = EMPTY_METADATA;
                    }
    
                    m_result.AddVerboseMessage("Adding directory {0}", path);
                    AddFolderToOutput(backend, path, lastwrite, metahash);
                    return true;
                }
    
                m_result.OperationProgressUpdater.UpdatefilesProcessed(++m_result.ExaminedFiles, m_result.SizeOfExaminedFiles);
                
                bool changed = false;
                
                // Last scan time
                DateTime oldModified;
                long lastFileSize = -1;
                string oldMetahash;
                long oldMetasize;
                var oldId = m_database.GetFileEntry(path, out oldModified, out lastFileSize, out oldMetahash, out oldMetasize);

                long filestatsize = -1;
                try { filestatsize = snapshot.GetFileSize(path); }
                catch { }

                IMetahash metahashandsize = m_options.StoreMetadata ? Utility.WrapMetadata(GenerateMetadata(snapshot, path, attributes), m_options) : EMPTY_METADATA;

                var timestampChanged = lastwrite != oldModified || lastwrite.Ticks == 0 || oldModified.Ticks == 0;
                var filesizeChanged = filestatsize < 0 || lastFileSize < 0 || filestatsize != lastFileSize;
                var tooLargeFile = m_options.SkipFilesLargerThan != long.MaxValue && m_options.SkipFilesLargerThan != 0 && filestatsize >= 0 && filestatsize > m_options.SkipFilesLargerThan;
                var metadatachanged = !m_options.SkipMetadata && (metahashandsize.Size != oldMetasize || metahashandsize.Hash != oldMetahash);

                if ((oldId < 0 || m_options.DisableFiletimeCheck || timestampChanged || filesizeChanged || metadatachanged) && !tooLargeFile)
                {
                    m_result.AddVerboseMessage("Checking file for changes {0}, new: {1}, timestamp changed: {2}, size changed: {3}, metadatachanged: {4}, {5} vs {6}", path, oldId <= 0, timestampChanged, filesizeChanged, metadatachanged, lastwrite, oldModified);

                    m_result.OpenedFiles++;
                    
                    long filesize = 0;

                    var hint = m_options.GetCompressionHintFromFilename(path);
                    var oldHash = oldId < 0 ? null : m_database.GetFileHash(oldId);

                    using (var blocklisthashes = new Library.Utility.FileBackedStringList())
                    using (var hashcollector = new Library.Utility.FileBackedStringList())
                    {
                        using (var fs = new Blockprocessor(snapshot.OpenRead(path), m_blockbuffer))
                        {
                            try { m_result.OperationProgressUpdater.StartFile(path, fs.Length); }
                            catch (Exception ex) { m_result.AddWarning(string.Format("Failed to read file length for file {0}", path), ex); }
                            
                            int blocklistoffset = 0;

                            m_filehasher.Initialize();

                            
                            var offset = 0;
                            var remaining = fs.Readblock();
                            
                            do
                            {
                                var size = Math.Min(m_blocksize, remaining);

                                m_filehasher.TransformBlock(m_blockbuffer, offset, size, m_blockbuffer, offset);
                                var blockkey = m_blockhasher.ComputeHash(m_blockbuffer, offset, size);
                                if (m_blocklistbuffer.Length - blocklistoffset < blockkey.Length)
                                {
                                    var blkey = Convert.ToBase64String(m_blockhasher.ComputeHash(m_blocklistbuffer, 0, blocklistoffset));
                                    blocklisthashes.Add(blkey);
                                    AddBlockToOutput(backend, blkey, m_blocklistbuffer, 0, blocklistoffset, CompressionHint.Noncompressible, true);
                                    blocklistoffset = 0;
                                }

                                Array.Copy(blockkey, 0, m_blocklistbuffer, blocklistoffset, blockkey.Length);
                                blocklistoffset += blockkey.Length;

                                var key = Convert.ToBase64String(blockkey);
                                AddBlockToOutput(backend, key, m_blockbuffer, offset, size, hint, false);
                                hashcollector.Add(key);
                                filesize += size;

                                m_result.OperationProgressUpdater.UpdateFileProgress(filesize);
                                if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                                    return false;
                                
                                remaining -= size;
                                offset += size;
                                
                                if (remaining == 0)
                                {
                                    offset = 0;
                                    remaining = fs.Readblock();
                                }

                            } while (remaining > 0);

                            //If all fits in a single block, don't bother with blocklists
                            if (hashcollector.Count > 1)
                            {
                                var blkeyfinal = Convert.ToBase64String(m_blockhasher.ComputeHash(m_blocklistbuffer, 0, blocklistoffset));
                                blocklisthashes.Add(blkeyfinal);
                                AddBlockToOutput(backend, blkeyfinal, m_blocklistbuffer, 0, blocklistoffset, CompressionHint.Noncompressible, true);
                            }
                        }

                        m_result.SizeOfOpenedFiles += filesize;
                        m_filehasher.TransformFinalBlock(m_blockbuffer, 0, 0);

                        var filekey = Convert.ToBase64String(m_filehasher.Hash);
                        if (oldHash != filekey)
                        {
                            if (oldHash == null)
                                m_result.AddVerboseMessage("New file {0}", path);
                            else
                                m_result.AddVerboseMessage("File has changed {0}", path);
                            if (oldId < 0)
                            {
                                m_result.AddedFiles++;
                                m_result.SizeOfAddedFiles += filesize;
					            
					            if (m_options.Dryrun)
					            	m_result.AddDryrunMessage(string.Format("Would add new file {0}, size {1}", path, Library.Utility.Utility.FormatSizeString(filesize)));
                            }
                            else
                            {
                                m_result.ModifiedFiles++;
                                m_result.SizeOfModifiedFiles += filesize;
					            
					            if (m_options.Dryrun)
					            	m_result.AddDryrunMessage(string.Format("Would add changed file {0}, size {1}", path, Library.Utility.Utility.FormatSizeString(filesize)));
                            }

                            AddFileToOutput(backend, path, filesize, lastwrite, metahashandsize, hashcollector, filekey, blocklisthashes);
                            changed = true;
                        }
                        else if (metadatachanged)
                        {
                            m_result.AddVerboseMessage("File has only metadata changes {0}", path);
                            AddFileToOutput(backend, path, filesize, lastwrite, metahashandsize, hashcollector, filekey, blocklisthashes);
                            changed = true;
                        }
                        else
                        {
                            // When we write the file to output, update the last modified time
                            oldModified = lastwrite;
                            m_result.AddVerboseMessage("File has not changed {0}", path);
                        }

                    }
                }
                else
                {
                    if (m_options.SkipFilesLargerThan == long.MaxValue || m_options.SkipFilesLargerThan == 0 || snapshot.GetFileSize(path) < m_options.SkipFilesLargerThan)                
                        m_result.AddVerboseMessage("Skipped checking file, because timestamp was not updated {0}", path);
                    else
                        m_result.AddVerboseMessage("Skipped checking file, because the size exceeds limit {0}", path);
                }

                if (!changed)
                    AddUnmodifiedFile(oldId, lastwrite);

                m_result.SizeOfExaminedFiles += filestatsize;
                if (filestatsize != 0)
                    m_result.OperationProgressUpdater.UpdatefilesProcessed(m_result.ExaminedFiles, m_result.SizeOfExaminedFiles);
            }
            catch (Exception ex)
            {
                m_result.AddWarning(string.Format("Failed to process path: {0}", path), ex);
                m_result.FilesWithError++;
            }

            return true;
        }
Пример #23
0
        public void Run(string[] sources, Library.Utility.IFilter filter)
        {
            m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_Begin);                        
            
            using(m_database = new LocalBackupDatabase(m_options.Dbpath, m_options))
            {
                m_result.SetDatabase(m_database);
                m_result.Dryrun = m_options.Dryrun;

                Utility.UpdateOptionsFromDb(m_database, m_options);
                Utility.VerifyParameters(m_database, m_options);

                if (m_database.RepairInProgress)
                    throw new Exception("The database was attempted repaired, but the repair did not complete. This database may be incomplete and the backup process cannot continue. You may delete the local database and attempt to repair it again.");

                m_blocksize = m_options.Blocksize;

                m_blockbuffer = new byte[m_options.Blocksize * Math.Max(1, m_options.FileReadBufferSize / m_options.Blocksize)];
                m_blocklistbuffer = new byte[m_options.Blocksize];

                m_blockhasher = System.Security.Cryptography.HashAlgorithm.Create(m_options.BlockHashAlgorithm);
                m_filehasher = System.Security.Cryptography.HashAlgorithm.Create(m_options.FileHashAlgorithm);

                if (m_blockhasher == null)
                    throw new Exception(Strings.Common.InvalidHashAlgorithm(m_options.BlockHashAlgorithm));
                if (m_filehasher == null)
                    throw new Exception(Strings.Common.InvalidHashAlgorithm(m_options.FileHashAlgorithm));

                if (!m_blockhasher.CanReuseTransform)
                    throw new Exception(Strings.Common.InvalidCryptoSystem(m_options.BlockHashAlgorithm));
                if (!m_filehasher.CanReuseTransform)
                    throw new Exception(Strings.Common.InvalidCryptoSystem(m_options.FileHashAlgorithm));

                m_database.VerifyConsistency(null, m_options.Blocksize, m_options.BlockhashSize);
                // If there is no filter, we set an empty filter to simplify the code
                // If there is a filter, we make sure that the sources are included
                m_filter = filter ?? new Library.Utility.FilterExpression();
                m_sourceFilter = new Library.Utility.FilterExpression(sources, true);
            	
                m_backendLogFlushTimer = DateTime.Now.Add(FLUSH_TIMESPAN);
                System.Threading.Thread parallelScanner = null;

    
                try
                {

                    using(var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, m_database))
                    using(var filesetvolume = new FilesetVolumeWriter(m_options, m_database.OperationTimestamp))
                    {
                        using(var snapshot = GetSnapshot(sources, m_options, m_result))
                        {
                            // Start parallel scan
                            if (m_options.ChangedFilelist == null || m_options.ChangedFilelist.Length < 1)
                            {
                                parallelScanner = new System.Threading.Thread(CountFilesThread) {
                                    Name = "Read ahead file counter",
                                    IsBackground = true
                                };
                                parallelScanner.Start(snapshot);
                            }

                            PreBackupVerify(backend);

                            // Verify before uploading a synthetic list
                            m_database.VerifyConsistency(null, m_options.Blocksize, m_options.BlockhashSize);
                            UploadSyntheticFilelist(backend);

                            m_database.BuildLookupTable(m_options);
                            m_transaction = m_database.BeginTransaction();

                            var repcnt = 0;
                            while(repcnt < 100 && m_database.GetRemoteVolumeID(filesetvolume.RemoteFilename) >= 0)
                                filesetvolume.ResetRemoteFilename(m_options, m_database.OperationTimestamp.AddSeconds(repcnt++));
        		            
                            if (m_database.GetRemoteVolumeID(filesetvolume.RemoteFilename) >= 0)
                                throw new Exception("Unable to generate a unique fileset name");

                            m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_ProcessingFiles);
                            var filesetvolumeid = m_database.RegisterRemoteVolume(filesetvolume.RemoteFilename, RemoteVolumeType.Files, RemoteVolumeState.Temporary, m_transaction);
                            m_database.CreateFileset(filesetvolumeid, VolumeBase.ParseFilename(filesetvolume.RemoteFilename).Time, m_transaction);
        	
                            RunMainOperation(snapshot, backend);

                            //If the scanner is still running for some reason, make sure we kill it now 
                            if (parallelScanner != null && parallelScanner.IsAlive)
                                parallelScanner.Abort();
                        }

                        var lastVolumeSize = FinalizeRemoteVolumes(backend);
    		            
                        using(new Logging.Timer("UpdateChangeStatistics"))
                            m_database.UpdateChangeStatistics(m_result);
                        using(new Logging.Timer("VerifyConsistency"))
                            m_database.VerifyConsistency(m_transaction, m_options.Blocksize, m_options.BlockhashSize);
    
                        UploadRealFileList(backend, filesetvolume);
    									
                        m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_WaitForUpload);
                        using(new Logging.Timer("Async backend wait"))
                            backend.WaitForComplete(m_database, m_transaction);
                            
                        if (m_result.TaskControlRendevouz() != TaskControlState.Stop) 
                            CompactIfRequired(backend, lastVolumeSize);
    		            
                        if (m_options.UploadVerificationFile)
                        {
                            m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_VerificationUpload);
                            FilelistProcessor.UploadVerificationFile(backend.BackendUrl, m_options, m_result.BackendWriter, m_database, m_transaction);
                        }

                        if (m_options.Dryrun)
                        {
                            m_transaction.Rollback();
                            m_transaction = null;
                        }
                        else
                        {
                            using(new Logging.Timer("CommitFinalizingBackup"))
                                m_transaction.Commit();
                                
                            m_transaction = null;
                            m_database.Vacuum();
                            
                            if (m_result.TaskControlRendevouz() != TaskControlState.Stop && !m_options.NoBackendverification)
                            {
                                PostBackupVerification();
                            }

                        }
                        
                        m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_Complete);
                        m_database.WriteResults();                    
                        m_database.PurgeLogData(m_options.LogRetention);
                        return;
                    }
                }
                catch (Exception ex)
                {
                    m_result.AddError("Fatal error", ex);
                    throw;
                }
                finally
                {
                    if (parallelScanner != null && parallelScanner.IsAlive)
                    {
                        parallelScanner.Abort();
                        parallelScanner.Join(500);
                        if (parallelScanner.IsAlive)
                            m_result.AddWarning("Failed to terminate filecounter thread", null);
                    }
                
                    if (m_transaction != null)
                        try { m_transaction.Rollback(); }
                        catch (Exception ex) { m_result.AddError(string.Format("Rollback error: {0}", ex.Message), ex); }
                }
            }
        }
Пример #24
0
        public void DoRun(long samples, LocalTestDatabase db, BackendManager backend)
        {
            var blockhasher = System.Security.Cryptography.HashAlgorithm.Create(m_options.BlockHashAlgorithm);

            if (blockhasher == null)
            {
                throw new Exception(string.Format(Strings.Foresthash.InvalidHashAlgorithm, m_options.BlockHashAlgorithm));
            }
            if (!blockhasher.CanReuseTransform)
            {
                throw new Exception(string.Format(Strings.Foresthash.InvalidCryptoSystem, m_options.BlockHashAlgorithm));
            }

            var hashsize = blockhasher.HashSize / 8;
            var files    = db.SelectTestTargets(samples, m_options).ToList();

            if (m_options.FullRemoteVerification)
            {
                foreach (var vol in new AsyncDownloader(files, backend))
                {
                    var parsedInfo = Volumes.VolumeBase.ParseFilename(vol.Name);
                    try
                    {
                        if (m_results.TaskControlRendevouz() == TaskControlState.Stop)
                        {
                            backend.WaitForComplete(db, null);
                            return;
                        }

                        using (var tf = vol.TempFile)
                        {
                            if (parsedInfo.FileType == RemoteVolumeType.Files)
                            {
                                //Compare with db and see if all files are accounted for
                                // with correct file hashes and blocklist hashes
                                using (var fl = db.CreateFilelist(vol.Name))
                                {
                                    using (var rd = new Volumes.FilesetVolumeReader(parsedInfo.CompressionModule, tf, m_options))
                                        foreach (var f in rd.Files)
                                        {
                                            fl.Add(f.Path, f.Size, f.Hash, f.Metasize, f.Metahash, f.BlocklistHashes, f.Type, f.Time);
                                        }

                                    m_results.AddResult(vol.Name, fl.Compare().ToList());
                                }
                            }
                            else if (parsedInfo.FileType == RemoteVolumeType.Index)
                            {
                                var blocklinks = new List <Tuple <string, string, long> >();
                                IEnumerable <KeyValuePair <Duplicati.Library.Interface.TestEntryStatus, string> > combined = new KeyValuePair <Duplicati.Library.Interface.TestEntryStatus, string> [0];

                                //Compare with db and see that all hashes and volumes are listed
                                using (var rd = new Volumes.IndexVolumeReader(parsedInfo.CompressionModule, tf, m_options, hashsize))
                                    foreach (var v in rd.Volumes)
                                    {
                                        blocklinks.Add(new Tuple <string, string, long>(v.Filename, v.Hash, v.Length));
                                        using (var bl = db.CreateBlocklist(v.Filename))
                                        {
                                            foreach (var h in v.Blocks)
                                            {
                                                bl.AddBlock(h.Key, h.Value);
                                            }

                                            combined = combined.Union(bl.Compare().ToArray());
                                        }
                                    }

                                using (var il = db.CreateIndexlist(vol.Name))
                                {
                                    foreach (var t in blocklinks)
                                    {
                                        il.AddBlockLink(t.Item1, t.Item2, t.Item3);
                                    }

                                    combined = combined.Union(il.Compare()).ToList();
                                }

                                m_results.AddResult(vol.Name, combined.ToList());
                            }
                            else if (parsedInfo.FileType == RemoteVolumeType.Blocks)
                            {
                                using (var bl = db.CreateBlocklist(vol.Name))
                                    using (var rd = new Volumes.BlockVolumeReader(parsedInfo.CompressionModule, tf, m_options))
                                    {
                                        //Verify that all blocks are in the file
                                        foreach (var b in rd.Blocks)
                                        {
                                            bl.AddBlock(b.Key, b.Value);
                                        }

                                        //Select 20% random blocks and verify their hashes match the filename and size
                                        var hashsamples = new List <KeyValuePair <string, long> >(rd.Blocks);
                                        var sampleCount = Math.Min(Math.Max(0, (int)(hashsamples.Count * 0.2)), hashsamples.Count - 1);
                                        var rnd         = new Random();

                                        while (hashsamples.Count > sampleCount)
                                        {
                                            hashsamples.RemoveAt(rnd.Next(hashsamples.Count));
                                        }

                                        var blockbuffer = new byte[m_options.Blocksize];
                                        var changes     = new List <KeyValuePair <Library.Interface.TestEntryStatus, string> >();
                                        foreach (var s in hashsamples)
                                        {
                                            var size = rd.ReadBlock(s.Key, blockbuffer);
                                            if (size != s.Value)
                                            {
                                                changes.Add(new KeyValuePair <Library.Interface.TestEntryStatus, string>(Library.Interface.TestEntryStatus.Modified, s.Key));
                                            }
                                            else
                                            {
                                                var hash = Convert.ToBase64String(blockhasher.ComputeHash(blockbuffer, 0, size));
                                                if (hash != s.Key)
                                                {
                                                    changes.Add(new KeyValuePair <Library.Interface.TestEntryStatus, string>(Library.Interface.TestEntryStatus.Modified, s.Key));
                                                }
                                            }
                                        }

                                        m_results.AddResult(vol.Name, changes.Union(bl.Compare().ToList()));
                                    }
                            }
                        }

                        db.UpdateVerificationCount(vol.Name);
                    }
                    catch (Exception ex)
                    {
                        m_results.AddResult(vol.Name, new KeyValuePair <Duplicati.Library.Interface.TestEntryStatus, string>[] { new KeyValuePair <Duplicati.Library.Interface.TestEntryStatus, string>(Duplicati.Library.Interface.TestEntryStatus.Error, ex.Message) });
                        m_results.AddError(string.Format("Failed to process file {0}", vol.Name), ex);
                        if (ex is System.Threading.ThreadAbortException)
                        {
                            throw;
                        }
                    }
                }
            }
            else
            {
                foreach (var f in files)
                {
                    try
                    {
                        if (m_results.TaskControlRendevouz() == TaskControlState.Stop)
                        {
                            return;
                        }

                        backend.GetForTesting(f.Name, f.Size, f.Hash);
                        db.UpdateVerificationCount(f.Name);
                        m_results.AddResult(f.Name, new KeyValuePair <Duplicati.Library.Interface.TestEntryStatus, string> [0]);
                    }
                    catch (Exception ex)
                    {
                        m_results.AddResult(f.Name, new KeyValuePair <Duplicati.Library.Interface.TestEntryStatus, string>[] { new KeyValuePair <Duplicati.Library.Interface.TestEntryStatus, string>(Duplicati.Library.Interface.TestEntryStatus.Error, ex.Message) });
                        m_results.AddError(string.Format("Failed to process file {0}", f.Name), ex);
                        if (ex is System.Threading.ThreadAbortException)
                        {
                            throw;
                        }
                    }
                }
            }
        }
Пример #25
0
        private void RunMainOperation(Snapshots.ISnapshotService snapshot, BackendManager backend)
        {
            var filterhandler = new FilterHandler(snapshot, m_attributeFilter, m_sourceFilter, m_filter, m_symlinkPolicy, m_options.HardlinkPolicy, m_result);

            using(new Logging.Timer("BackupMainOperation"))
            {
                if (m_options.ChangedFilelist != null && m_options.ChangedFilelist.Length >= 1)
                {
                    m_result.AddVerboseMessage("Processing supplied change list instead of enumerating filesystem");
                    m_result.OperationProgressUpdater.UpdatefileCount(m_options.ChangedFilelist.Length, 0, true);

                    foreach(var p in filterhandler.Mixin(m_options.ChangedFilelist))
                    {
                        if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                        {
                            m_result.AddMessage("Stopping backup operation on request");
                            break;
                        }

                        try
                        {
                            this.HandleFilesystemEntry(snapshot, backend, p, snapshot.GetAttributes(p));
                        }
                        catch (Exception ex)
                        {
                            m_result.AddWarning(string.Format("Failed to process element: {0}, message: {1}", p, ex.Message), ex);
                        }
                    }

                    m_database.AppendFilesFromPreviousSet(m_transaction, m_options.DeletedFilelist);
                }
                else
                {                                    
                    foreach(var path in filterhandler.EnumerateFilesAndFolders())
                    {
                        if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                        {
                            m_result.AddMessage("Stopping backup operation on request");
                            break;
                        }

                        var fa = FileAttributes.Normal;
                        try { fa = snapshot.GetAttributes(path); }
                        catch { }

                        this.HandleFilesystemEntry(snapshot, backend, path, fa);
                    }

                }

                m_result.OperationProgressUpdater.UpdatefileCount(m_result.ExaminedFiles, m_result.SizeOfExaminedFiles, true);
            }
        }
Пример #26
0
        public void DoRun(Database.LocalDeleteDatabase db, System.Data.IDbTransaction transaction, bool hasVerifiedBacked, bool forceCompact)
        {
            using(var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, db))
            {
                if (!hasVerifiedBacked && !m_options.NoBackendverification)
                    FilelistProcessor.VerifyRemoteList(backend, m_options, db, m_result.BackendWriter);

                var filesetNumbers = db.FilesetTimes.Zip(Enumerable.Range(0, db.FilesetTimes.Count()), (a, b) => new Tuple<long, DateTime>(b, a.Value));
                var toDelete = m_options.GetFilesetsToDelete(db.FilesetTimes.Select(x => x.Value).ToArray());

                if (toDelete != null && toDelete.Length > 0)
                    m_result.AddMessage(string.Format("Deleting {0} remote fileset(s) ...", toDelete.Length));

                var count = 0L;
                foreach(var f in db.DropFilesetsFromTable(toDelete, transaction))
                {
                    if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                    {
                        backend.WaitForComplete(db, transaction);
                        return;
                    }

                    count++;
                    if (!m_options.Dryrun)
                        backend.Delete(f.Key, f.Value);
                    else
                        m_result.AddDryrunMessage(string.Format("Would delete remote fileset: {0}", f.Key));
                }

                backend.WaitForComplete(db, transaction);

                if (!m_options.Dryrun)
                {
                    if (count == 0)
                        m_result.AddMessage("No remote filesets were deleted");
                    else
                        m_result.AddMessage(string.Format("Deleted {0} remote fileset(s)", count));
                }
                else
                {

                    if (count == 0)
                        m_result.AddDryrunMessage("No remote filesets would be deleted");
                    else
                        m_result.AddDryrunMessage(string.Format("{0} remote fileset(s) would be deleted", count));

                    if (count > 0 && m_options.Dryrun)
                        m_result.AddDryrunMessage("Remove --dry-run to actually delete files");
                }

                if (!m_options.NoAutoCompact && (forceCompact || (toDelete != null && toDelete.Length > 0)))
                {
                    m_result.CompactResults = new CompactResults(m_result);
                    new CompactHandler(m_backendurl, m_options, (CompactResults)m_result.CompactResults).DoCompact(db, true, transaction);
                }

                m_result.SetResults(
                    from n in filesetNumbers
                    where toDelete.Contains(n.Item2)
                    select n,
                    m_options.Dryrun);
            }
        }
Пример #27
0
        /// <summary>
        /// Helper method that verifies uploaded volumes and updates their state in the database.
        /// Throws an error if there are issues with the remote storage
        /// </summary>
        /// <param name="backend">The backend instance to use</param>
        /// <param name="options">The options used</param>
        /// <param name="database">The database to compare with</param>
        /// <param name="log">The log instance to use</param>
        public static void VerifyRemoteList(BackendManager backend, Options options, LocalDatabase database, IBackendWriter log)
        {
            var  tp           = RemoteListAnalysis(backend, options, database, log);
            long extraCount   = 0;
            long missingCount = 0;

            foreach (var n in tp.ExtraVolumes)
            {
                log.AddWarning(string.Format("Extra unknown file: {0}", n.File.Name), null);
                extraCount++;
            }

            foreach (var n in tp.MissingVolumes)
            {
                log.AddWarning(string.Format("Missing file: {0}", n.Name), null);
                missingCount++;
            }

            if (extraCount > 0)
            {
                var s = string.Format("Found {0} remote files that are not recorded in local storage, please run repair", extraCount);
                log.AddError(s, null);
                throw new Exception(s);
            }

            var lookup  = new Dictionary <string, string>();
            var doubles = new Dictionary <string, string>();

            foreach (var v in tp.ParsedVolumes)
            {
                if (lookup.ContainsKey(v.File.Name))
                {
                    doubles[v.File.Name] = null;
                }
                else
                {
                    lookup[v.File.Name] = null;
                }
            }

            if (doubles.Count > 0)
            {
                var s = string.Format("Found remote files reported as duplicates, either the backend module is broken or you need to manually remove the extra copies.\nThe following files were found multiple times: ", string.Join(", ", doubles.Keys));
                log.AddError(s, null);
                throw new Exception(s);
            }

            if (missingCount > 0)
            {
                string s;
                if (!tp.BackupPrefixes.Contains(options.Prefix) && tp.BackupPrefixes.Length > 0)
                {
                    s = string.Format("Found {0} files that are missing from the remote storage, and no files with the backup prefix {1}, but found the following backup prefixes: {2}", missingCount, options.Prefix, string.Join(", ", tp.BackupPrefixes));
                }
                else
                {
                    s = string.Format("Found {0} files that are missing from the remote storage, please run repair", missingCount);
                }

                log.AddError(s, null);
                throw new Exception(s);
            }
        }
Пример #28
0
        private IEnumerable<KeyValuePair<string, long>> DoDelete(LocalDeleteDatabase db, BackendManager backend, IEnumerable<IRemoteVolume> deleteableVolumes, ref System.Data.IDbTransaction transaction)
        {
            // Mark all volumes as disposable
            foreach(var f in deleteableVolumes)
                db.UpdateRemoteVolume(f.Name, RemoteVolumeState.Deleting, f.Size, f.Hash, transaction);

            // Before we commit the current state, make sure the backend has caught up
            backend.WaitForEmpty(db, transaction);

            if (!m_options.Dryrun)
            {
                transaction.Commit();
                transaction = db.BeginTransaction();
            }

            return PerformDelete(backend, db.GetDeletableVolumes(deleteableVolumes, transaction));
        }
Пример #29
0
        /// <summary>
        /// Helper method that verifies uploaded volumes and updates their state in the database.
        /// Throws an error if there are issues with the remote storage
        /// </summary>
        /// <param name="backend">The backend instance to use</param>
        /// <param name="options">The options used</param>
        /// <param name="database">The database to compare with</param>
        public static RemoteAnalysisResult RemoteListAnalysis(BackendManager backend, Options options, LocalDatabase database, IBackendWriter log)
        {
            var rawlist = backend.List();
            var lookup  = new Dictionary <string, Volumes.IParsedVolume>();

            var remotelist = (from n in rawlist
                              let p = Volumes.VolumeBase.ParseFilename(n)
                                      where p != null && p.Prefix == options.Prefix
                                      select p).ToList();

            var otherlist = (from n in rawlist
                             let p = Volumes.VolumeBase.ParseFilename(n)
                                     where p != null && p.Prefix != options.Prefix
                                     select p).ToList();

            var unknownlist = (from n in rawlist
                               let p = Volumes.VolumeBase.ParseFilename(n)
                                       where p == null
                                       select n).ToList();

            var filesets = (from n in remotelist
                            where n.FileType == RemoteVolumeType.Files orderby n.Time descending
                            select n).ToList();

            log.KnownFileCount   = remotelist.Count();
            log.KnownFileSize    = remotelist.Select(x => x.File.Size).Sum();
            log.UnknownFileCount = unknownlist.Count();
            log.UnknownFileSize  = unknownlist.Select(x => x.Size).Sum();
            log.BackupListCount  = filesets.Count;
            log.LastBackupDate   = filesets.Count == 0 ? new DateTime(0) : filesets[0].Time.ToLocalTime();

            if (backend is Library.Interface.IQuotaEnabledBackend)
            {
                log.TotalQuotaSpace = ((Library.Interface.IQuotaEnabledBackend)backend).TotalQuotaSpace;
                log.FreeQuotaSpace  = ((Library.Interface.IQuotaEnabledBackend)backend).FreeQuotaSpace;
            }

            log.AssignedQuotaSpace = options.QuotaSize;

            foreach (var s in remotelist)
            {
                lookup[s.File.Name] = s;
            }

            var missing     = new List <RemoteVolumeEntry>();
            var missingHash = new List <Tuple <long, RemoteVolumeEntry> >();
            var locallist   = database.GetRemoteVolumes();

            foreach (var i in locallist)
            {
                Volumes.IParsedVolume r;
                var remoteFound = lookup.TryGetValue(i.Name, out r);
                var correctSize = remoteFound && i.Size >= 0 && (i.Size == r.File.Size || r.File.Size < 0);

                lookup.Remove(i.Name);

                switch (i.State)
                {
                case RemoteVolumeState.Deleted:
                    if (remoteFound)
                    {
                        log.AddMessage(string.Format("ignoring remote file listed as {0}: {1}", i.State, i.Name));
                    }

                    break;

                case RemoteVolumeState.Temporary:
                case RemoteVolumeState.Deleting:
                    if (remoteFound)
                    {
                        log.AddMessage(string.Format("removing remote file listed as {0}: {1}", i.State, i.Name));
                        backend.Delete(i.Name, i.Size, true);
                    }
                    else
                    {
                        log.AddMessage(string.Format("removing file listed as {0}: {1}", i.State, i.Name));
                        database.RemoveRemoteVolume(i.Name, null);
                    }
                    break;

                case RemoteVolumeState.Uploading:
                    if (remoteFound && correctSize && r.File.Size >= 0)
                    {
                        log.AddMessage(string.Format("promoting uploaded complete file from {0} to {2}: {1}", i.State, i.Name, RemoteVolumeState.Uploaded));
                        database.UpdateRemoteVolume(i.Name, RemoteVolumeState.Uploaded, i.Size, i.Hash);
                    }
                    else if (!remoteFound)
                    {
                        log.AddMessage(string.Format("scheduling missing file for deletion, currently listed as {0}: {1}", i.State, i.Name));
                        database.RemoveRemoteVolume(i.Name, null);
                        database.RegisterRemoteVolume(i.Name, i.Type, RemoteVolumeState.Deleting, null);
                    }
                    else
                    {
                        log.AddMessage(string.Format("removing incomplete remote file listed as {0}: {1}", i.State, i.Name));
                        backend.Delete(i.Name, i.Size, true);
                    }
                    break;

                case RemoteVolumeState.Uploaded:
                    if (!remoteFound)
                    {
                        missing.Add(i);
                    }
                    else if (correctSize)
                    {
                        database.UpdateRemoteVolume(i.Name, RemoteVolumeState.Verified, i.Size, i.Hash);
                    }
                    else
                    {
                        missingHash.Add(new Tuple <long, RemoteVolumeEntry>(r.File.Size, i));
                    }

                    break;

                case RemoteVolumeState.Verified:
                    if (!remoteFound)
                    {
                        missing.Add(i);
                    }
                    else if (!correctSize)
                    {
                        missingHash.Add(new Tuple <long, RemoteVolumeEntry>(r.File.Size, i));
                    }

                    break;

                default:
                    log.AddWarning(string.Format("unknown state for remote file listed as {0}: {1}", i.State, i.Name), null);
                    break;
                }

                backend.FlushDbMessages();
            }


            foreach (var i in missingHash)
            {
                log.AddWarning(string.Format("remote file {1} is listed as {0} with size {2} but should be {3}, please verify the sha256 hash \"{4}\"", i.Item2.State, i.Item2.Name, i.Item1, i.Item2.Size, i.Item2.Hash), null);
            }

            return(new RemoteAnalysisResult()
            {
                ParsedVolumes = remotelist,
                OtherVolumes = otherlist,
                ExtraVolumes = lookup.Values,
                MissingVolumes = missing,
                VerificationRequiredVolumes = missingHash.Select(x => x.Item2)
            });
        }
Пример #30
0
        private IEnumerable<KeyValuePair<string, long>> PerformDelete(BackendManager backend, IEnumerable<IRemoteVolume> list)
		{
            foreach(var f in list)
			{
				if (!m_options.Dryrun)
					backend.Delete(f.Name, f.Size);
				else
					m_result.AddDryrunMessage(string.Format("Would delete remote file: {0}, size: {1}", f.Name, Library.Utility.Utility.FormatSizeString(f.Size)));

				yield return new KeyValuePair<string, long>(f.Name, f.Size);
			}				
		}
Пример #31
0
        /// <summary>
        /// Run the recreate procedure
        /// </summary>
        /// <param name="path">Path to the database that will be created</param>
        /// <param name="filelistfilter">A filter that can be used to disregard certain remote files, intended to be used to select a certain filelist</param>
        /// <param name="filenamefilter">Filters the files in a filelist to prevent downloading unwanted data</param>
        /// <param name="blockprocessor">A callback hook that can be used to work with downloaded block volumes, intended to be use to recover data blocks while processing blocklists</param>
        internal void DoRun(LocalDatabase dbparent, Library.Utility.IFilter filter = null, NumberedFilterFilelistDelegate filelistfilter = null, BlockVolumePostProcessor blockprocessor = null)
        {
            var hashalg = System.Security.Cryptography.HashAlgorithm.Create(m_options.BlockHashAlgorithm);
            if (hashalg == null)
                throw new Exception(string.Format(Strings.Foresthash.InvalidHashAlgorithm, m_options.BlockHashAlgorithm));
            var hashsize = hashalg.HashSize / 8;

            //We build a local database in steps.
            using(var restoredb = new LocalRecreateDatabase(dbparent, m_options))
            using(var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, restoredb))
            {
                var volumeIds = new Dictionary<string, long>();

                var rawlist = backend.List();

                //First step is to examine the remote storage to see what
                // kind of data we can find
                var remotefiles =
                (from x in rawlist
                let n = VolumeBase.ParseFilename(x)
                where
                    n != null
                        &&
                    n.Prefix == m_options.Prefix
                select n).ToArray(); //ToArray() ensures that we do not remote-request it multiple times

                if (remotefiles.Length == 0)
                {
                    if (rawlist.Count == 0)
                        throw new Exception("No files were found at the remote location, perhaps the target url is incorrect?");
                    else
                    {
                        var tmp =
                    (from x in rawlist
                        let n = VolumeBase.ParseFilename(x)
                    where
                        n != null
                    select n.Prefix).ToArray();

                        var types = tmp.Distinct().ToArray();
                        if (tmp.Length == 0)
                            throw new Exception(string.Format("Found {0} files at the remote storage, but none that could be parsed", rawlist.Count));
                        else if (types.Length == 1)
                            throw new Exception(string.Format("Found {0} parse-able files with the prefix {1}, did you forget to set the backup-prefix?", tmp.Length, types[0]));
                        else
                            throw new Exception(string.Format("Found {0} parse-able files (of {1} files) with different prefixes: {2}, did you forget to set the backup-prefix?", tmp.Length, rawlist.Count, string.Join(", ", types)));
                    }
                }

                //Then we select the filelist we should work with,
                // and create the filelist table to fit
                IEnumerable<IParsedVolume> filelists =
                    from n in remotefiles
                    where n.FileType == RemoteVolumeType.Files
                    orderby n.Time descending
                    select n;

                if (filelistfilter != null)
                    filelists = filelistfilter(filelists).Select(x => x.Value).ToArray();

                foreach(var fl in remotefiles)
                    volumeIds[fl.File.Name] = restoredb.RegisterRemoteVolume(fl.File.Name, fl.FileType, RemoteVolumeState.Uploaded);

                //Record all blocksets and files needed
                using(var tr = restoredb.BeginTransaction())
                {
                    var filelistWork = (from n in filelists orderby n.Time select new RemoteVolume(n.File) as IRemoteVolume).ToList();
                    foreach(var entry in new AsyncDownloader(filelistWork, backend))
                        try
                        {
                            if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                            {
                                backend.WaitForComplete(restoredb, null);
                                return;
                            }

                            using(var tmpfile = entry.TempFile)
                            {
                                if (entry.Hash != null && entry.Size > 0)
                                    restoredb.UpdateRemoteVolume(entry.Name, RemoteVolumeState.Verified, entry.Size, entry.Hash, tr);

                                var parsed = VolumeBase.ParseFilename(entry.Name);
                                // Create timestamped operations based on the file timestamp
                                var filesetid = restoredb.CreateFileset(volumeIds[entry.Name], parsed.Time, tr);
                                using(var filelistreader = new FilesetVolumeReader(parsed.CompressionModule, tmpfile, m_options))
                                    foreach(var fe in filelistreader.Files.Where(x => Library.Utility.FilterExpression.Matches(filter, x.Path)))
                                    {
                                        try
                                        {
                                            if (fe.Type == FilelistEntryType.Folder)
                                            {
                                                restoredb.AddDirectoryEntry(filesetid, fe.Path, fe.Time, fe.Metahash, fe.Metahash == null ? -1 : fe.Metasize, tr);
                                            }
                                            else if (fe.Type == FilelistEntryType.File)
                                            {
                                                var blocksetid = restoredb.AddBlockset(fe.Hash, fe.Size, fe.BlocklistHashes, tr);
                                                restoredb.AddFileEntry(filesetid, fe.Path, fe.Time, blocksetid, fe.Metahash, fe.Metahash == null ? -1 : fe.Metasize, tr);
                                            }
                                            else if (fe.Type == FilelistEntryType.Symlink)
                                            {
                                                restoredb.AddSymlinkEntry(filesetid, fe.Path, fe.Time, fe.Metahash, fe.Metahash == null ? -1 : fe.Metasize, tr);
                                            }
                                            else
                                            {
                                                m_result.AddWarning(string.Format("Skipping file-entry with unknown type {0}: {1} ", fe.Type, fe.Path), null);
                                            }
                                        }
                                        catch (Exception ex)
                                        {
                                            m_result.AddWarning(string.Format("Failed to process file-entry: {0}", fe.Path), ex);
                                        }
                                    }
                            }
                        }
                        catch (Exception ex)
                        {
                            m_result.AddWarning(string.Format("Failed to process file: {0}", entry.Name), ex);
                            if (ex is System.Threading.ThreadAbortException)
                                throw;
                        }

                    using(new Logging.Timer("CommitUpdateFilesetFromRemote"))
                        tr.Commit();
                }

                //Grab all index files, and update the block table
                using(var tr = restoredb.BeginTransaction())
                {
                    var indexfiles =
                        from n in remotefiles
                        where n.FileType == RemoteVolumeType.Index
                        select new RemoteVolume(n.File) as IRemoteVolume;

                    foreach(var sf in new AsyncDownloader(indexfiles.ToList(), backend))
                        try
                        {
                            if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                            {
                                backend.WaitForComplete(restoredb, null);
                                return;
                            }

                            using(var tmpfile = sf.TempFile)
                            {
                                if (sf.Hash != null && sf.Size > 0)
                                    restoredb.UpdateRemoteVolume(sf.Name, RemoteVolumeState.Verified, sf.Size, sf.Hash, tr);

                                using(var svr = new IndexVolumeReader(RestoreHandler.GetCompressionModule(sf.Name), tmpfile, m_options, hashsize))
                                {
                                    Utility.VerifyParameters(restoredb, m_options);

                                    foreach(var a in svr.Volumes)
                                    {
                                        var volumeID = restoredb.GetRemoteVolumeID(a.Filename);
                                        //Add all block/volume mappings
                                        foreach(var b in a.Blocks)
                                            restoredb.UpdateBlock(b.Key, b.Value, volumeID, tr);

                                        restoredb.UpdateRemoteVolume(a.Filename, RemoteVolumeState.Verified, a.Length, a.Hash, tr);
                                        restoredb.AddIndexBlockLink(restoredb.GetRemoteVolumeID(sf.Name), volumeID, tr);
                                    }

                                    //If there are blocklists in the index file, update the blocklists
                                    foreach(var b in svr.BlockLists)
                                        restoredb.UpdateBlockset(b.Hash, b.Blocklist, tr);
                                }
                            }
                        }
                        catch (Exception ex)
                        {
                            //Not fatal
                            m_result.AddWarning(string.Format("Failed to process index file: {0}", sf.Name), ex);
                            if (ex is System.Threading.ThreadAbortException)
                                throw;
                        }

                    using(new Logging.Timer("CommitRecreatedDb"))
                        tr.Commit();

                    // TODO: In some cases, we can avoid downloading all index files,
                    // if we are lucky and pick the right ones
                }

                // We have now grabbed as much information as possible,
                // if we are still missing data, we must now fetch block files
                restoredb.FindMissingBlocklistHashes(hashsize, null);

                //We do this in three passes
                for(var i = 0; i < 3; i++)
                {
                    // Grab the list matching the pass type
                    var lst = restoredb.GetMissingBlockListVolumes(i).ToList();
                    foreach (var sf in new AsyncDownloader(lst, backend))
                        using (var tmpfile = sf.TempFile)
                        using (var rd = new BlockVolumeReader(RestoreHandler.GetCompressionModule(sf.Name), tmpfile, m_options))
                        using (var tr = restoredb.BeginTransaction())
                        {
                            if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                            {
                                backend.WaitForComplete(restoredb, null);
                                return;
                            }

                            var volumeid = restoredb.GetRemoteVolumeID(sf.Name);

                            // Update the block table so we know about the block/volume map
                            foreach(var h in rd.Blocks)
                                restoredb.UpdateBlock(h.Key, h.Value, volumeid, tr);

                            // Grab all known blocklists from the volume
                            foreach (var blocklisthash in restoredb.GetBlockLists(volumeid))
                                restoredb.UpdateBlockset(blocklisthash, rd.ReadBlocklist(blocklisthash, hashsize), tr);

                            // Update tables so we know if we are done
                            restoredb.FindMissingBlocklistHashes(hashsize, tr);

                            using(new Logging.Timer("CommitRestoredBlocklist"))
                                tr.Commit();

                            //At this point we can patch files with data from the block volume
                            if (blockprocessor != null)
                                blockprocessor(sf.Name, rd);
                        }
                }

                backend.WaitForComplete(restoredb, null);

                //All done, we must verify that we have all blocklist fully intact
                // if this fails, the db will not be deleted, so it can be used,
                // except to continue a backup
                restoredb.VerifyConsistency(null);
            }
        }
Пример #32
0
		internal bool DoCompact(LocalDeleteDatabase db, bool hasVerifiedBackend, ref System.Data.IDbTransaction transaction)
        {
            var report = db.GetCompactReport(m_options.VolumeSize, m_options.Threshold, m_options.SmallFileSize, m_options.SmallFileMaxCount, transaction);
            report.ReportCompactData(m_result);
			
            if (report.ShouldReclaim || report.ShouldCompact)
            {
                using(var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, db))
                {
                    if (!hasVerifiedBackend && !m_options.NoBackendverification)
                        FilelistProcessor.VerifyRemoteList(backend, m_options, db, m_result.BackendWriter);
		
                    BlockVolumeWriter newvol = new BlockVolumeWriter(m_options);
                    newvol.VolumeID = db.RegisterRemoteVolume(newvol.RemoteFilename, RemoteVolumeType.Blocks, RemoteVolumeState.Temporary, transaction);
	
                    IndexVolumeWriter newvolindex = null;
                    if (m_options.IndexfilePolicy != Options.IndexFileStrategy.None)
                    {
                        newvolindex = new IndexVolumeWriter(m_options);
                        newvolindex.VolumeID = db.RegisterRemoteVolume(newvolindex.RemoteFilename, RemoteVolumeType.Index, RemoteVolumeState.Temporary, transaction);
                        db.AddIndexBlockLink(newvolindex.VolumeID, newvol.VolumeID, transaction);
                        newvolindex.StartVolume(newvol.RemoteFilename);
                    }
					
                    long blocksInVolume = 0;
                    long discardedBlocks = 0;
                    long discardedSize = 0;
                    byte[] buffer = new byte[m_options.Blocksize];
                    var remoteList = db.GetRemoteVolumes().Where(n => n.State == RemoteVolumeState.Uploaded || n.State == RemoteVolumeState.Verified).ToArray();
					
                    //These are for bookkeeping
                    var uploadedVolumes = new List<KeyValuePair<string, long>>();
                    var deletedVolumes = new List<KeyValuePair<string, long>>();
                    var downloadedVolumes = new List<KeyValuePair<string, long>>();
					
                    //We start by deleting unused volumes to save space before uploading new stuff
                    var fullyDeleteable = (from v in remoteList
                                           where report.DeleteableVolumes.Contains(v.Name)
                                           select (IRemoteVolume)v).ToList();                    
                    deletedVolumes.AddRange(DoDelete(db, backend, fullyDeleteable, ref transaction));

                    // This list is used to pick up unused volumes,
                    // so they can be deleted once the upload of the
                    // required fragments is complete
                    var deleteableVolumes = new List<IRemoteVolume>();

                    if (report.ShouldCompact)
                    {
                        var volumesToDownload = (from v in remoteList
                                                 where report.CompactableVolumes.Contains(v.Name)
                                                 select (IRemoteVolume)v).ToList();
						
                        using(var q = db.CreateBlockQueryHelper(m_options, transaction))
                        {
                            foreach(var entry in new AsyncDownloader(volumesToDownload, backend))
                            using(var tmpfile = entry.TempFile)
                            {
                                if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                                {
                                    backend.WaitForComplete(db, transaction);
                                    return false;
                                }
                                    
								downloadedVolumes.Add(new KeyValuePair<string, long>(entry.Name, entry.Size));
								var inst = VolumeBase.ParseFilename(entry.Name);
								using(var f = new BlockVolumeReader(inst.CompressionModule, tmpfile, m_options))
								{
									foreach(var e in f.Blocks)
									{
                                        if (q.UseBlock(e.Key, e.Value, transaction))
										{
											//TODO: How do we get the compression hint? Reverse query for filename in db?
											var s = f.ReadBlock(e.Key, buffer);
											if (s != e.Value)
												throw new Exception("Size mismatch problem, {0} vs {1}");
												
											newvol.AddBlock(e.Key, buffer, 0, s, Duplicati.Library.Interface.CompressionHint.Compressible);
											if (newvolindex != null)
												newvolindex.AddBlock(e.Key, e.Value);
												
											db.MoveBlockToNewVolume(e.Key, e.Value, newvol.VolumeID, transaction);
											blocksInVolume++;
											
											if (newvol.Filesize > m_options.VolumeSize)
											{
												uploadedVolumes.Add(new KeyValuePair<string, long>(newvol.RemoteFilename, new System.IO.FileInfo(newvol.LocalFilename).Length));
												if (newvolindex != null)
													uploadedVolumes.Add(new KeyValuePair<string, long>(newvolindex.RemoteFilename, new System.IO.FileInfo(newvolindex.LocalFilename).Length));
	
												if (!m_options.Dryrun)
													backend.Put(newvol, newvolindex);
												else
													m_result.AddDryrunMessage(string.Format("Would upload generated blockset of size {0}", Library.Utility.Utility.FormatSizeString(new System.IO.FileInfo(newvol.LocalFilename).Length)));
												
												
												newvol = new BlockVolumeWriter(m_options);
												newvol.VolumeID = db.RegisterRemoteVolume(newvol.RemoteFilename, RemoteVolumeType.Blocks, RemoteVolumeState.Temporary, transaction);
				
												if (m_options.IndexfilePolicy != Options.IndexFileStrategy.None)
												{
													newvolindex = new IndexVolumeWriter(m_options);
													newvolindex.VolumeID = db.RegisterRemoteVolume(newvolindex.RemoteFilename, RemoteVolumeType.Index, RemoteVolumeState.Temporary, transaction);
                                                    db.AddIndexBlockLink(newvolindex.VolumeID, newvol.VolumeID, transaction);
													newvolindex.StartVolume(newvol.RemoteFilename);
												}
												
												blocksInVolume = 0;
												
												//After we upload this volume, we can delete all previous encountered volumes
												deletedVolumes.AddRange(DoDelete(db, backend, deleteableVolumes, ref transaction));
                                                deleteableVolumes = new List<IRemoteVolume>();
											}
										}
										else
										{
											discardedBlocks++;
											discardedSize += e.Value;
										}
									}
								}
	
								deleteableVolumes.Add(entry);
							}
							
							if (blocksInVolume > 0)
							{
								uploadedVolumes.Add(new KeyValuePair<string, long>(newvol.RemoteFilename, new System.IO.FileInfo(newvol.LocalFilename).Length));
								if (newvolindex != null)
									uploadedVolumes.Add(new KeyValuePair<string, long>(newvolindex.RemoteFilename, new System.IO.FileInfo(newvolindex.LocalFilename).Length));
								if (!m_options.Dryrun)
									backend.Put(newvol, newvolindex);
								else
									m_result.AddDryrunMessage(string.Format("Would upload generated blockset of size {0}", Library.Utility.Utility.FormatSizeString(new System.IO.FileInfo(newvol.LocalFilename).Length)));
							}
							else
							{
				                db.RemoveRemoteVolume(newvol.RemoteFilename, transaction);
			                    if (newvolindex != null)
			                    {
				                    db.RemoveRemoteVolume(newvolindex.RemoteFilename, transaction);
				                    newvolindex.FinishVolume(null, 0);
			                    }
							}
						}
					}
					
					deletedVolumes.AddRange(DoDelete(db, backend, deleteableVolumes, ref transaction));
										
                    var downloadSize = downloadedVolumes.Where(x => x.Value >= 0).Aggregate(0L, (a,x) => a + x.Value);
                    var deletedSize = deletedVolumes.Where(x => x.Value >= 0).Aggregate(0L, (a,x) => a + x.Value);
                    var uploadSize = uploadedVolumes.Where(x => x.Value >= 0).Aggregate(0L, (a,x) => a + x.Value);
					
                    m_result.DeletedFileCount = deletedVolumes.Count;
                    m_result.DownloadedFileCount = downloadedVolumes.Count;
                    m_result.UploadedFileCount = uploadedVolumes.Count;
                    m_result.DeletedFileSize = deletedSize;
                    m_result.DownloadedFileSize = downloadSize;
                    m_result.UploadedFileSize = uploadSize;
                    m_result.Dryrun = m_options.Dryrun;
                    
					if (m_result.Dryrun)
					{
                        if (downloadedVolumes.Count == 0)
                            m_result.AddDryrunMessage(string.Format("Would delete {0} files, which would reduce storage by {1}", m_result.DeletedFileCount, Library.Utility.Utility.FormatSizeString(m_result.DeletedFileSize)));
                        else
                            m_result.AddDryrunMessage(string.Format("Would download {0} file(s) with a total size of {1}, delete {2} file(s) with a total size of {3}, and compact to {4} file(s) with a size of {5}, which would reduce storage by {6} file(s) and {7}", m_result.DownloadedFileCount, Library.Utility.Utility.FormatSizeString(m_result.DownloadedFileSize), m_result.DeletedFileCount, Library.Utility.Utility.FormatSizeString(m_result.DeletedFileSize), m_result.UploadedFileCount, Library.Utility.Utility.FormatSizeString(m_result.UploadedFileSize), m_result.DeletedFileCount - m_result.UploadedFileCount, Library.Utility.Utility.FormatSizeString(m_result.DeletedFileSize - m_result.UploadedFileSize)));
					}
					else
					{
                        if (m_result.DownloadedFileCount == 0)
                            m_result.AddMessage(string.Format("Deleted {0} files, which reduced storage by {1}", m_result.DeletedFileCount, Library.Utility.Utility.FormatSizeString(m_result.DeletedFileSize)));
                        else
                            m_result.AddMessage(string.Format("Downloaded {0} file(s) with a total size of {1}, deleted {2} file(s) with a total size of {3}, and compacted to {4} file(s) with a size of {5}, which reduced storage by {6} file(s) and {7}", m_result.DownloadedFileCount, Library.Utility.Utility.FormatSizeString(downloadSize), m_result.DeletedFileCount, Library.Utility.Utility.FormatSizeString(m_result.DeletedFileSize), m_result.UploadedFileCount, Library.Utility.Utility.FormatSizeString(m_result.UploadedFileSize), m_result.DeletedFileCount - m_result.UploadedFileCount, Library.Utility.Utility.FormatSizeString(m_result.DeletedFileSize - m_result.UploadedFileSize)));
					}
							
					backend.WaitForComplete(db, transaction);
				}
                
                return (m_result.DeletedFileCount + m_result.UploadedFileCount) > 0;
			}
			else
			{
                return false;
			}
		}
Пример #33
0
        public void DoRun(long samples, LocalTestDatabase db, BackendManager backend)
        {
            var files = db.SelectTestTargets(samples, m_options).ToList();

            m_results.OperationProgressUpdater.UpdatePhase(OperationPhase.Verify_Running);
            m_results.OperationProgressUpdater.UpdateProgress(0);
            var progress = 0L;

            if (m_options.FullRemoteVerification)
            {
                foreach (var vol in new AsyncDownloader(files, backend))
                {
                    try
                    {
                        if (m_results.TaskControlRendevouz() == TaskControlState.Stop)
                        {
                            backend.WaitForComplete(db, null);
                            return;
                        }

                        progress++;
                        m_results.OperationProgressUpdater.UpdateProgress((float)progress / files.Count);

                        KeyValuePair <string, IEnumerable <KeyValuePair <TestEntryStatus, string> > > res;
                        using (var tf = vol.TempFile)
                            res = TestVolumeInternals(db, vol, tf, m_options, m_results, m_options.FullBlockVerification ? 1.0 : 0.2);
                        m_results.AddResult(res.Key, res.Value);

                        db.UpdateVerificationCount(vol.Name);
                    }
                    catch (Exception ex)
                    {
                        m_results.AddResult(vol.Name, new KeyValuePair <Duplicati.Library.Interface.TestEntryStatus, string>[] { new KeyValuePair <Duplicati.Library.Interface.TestEntryStatus, string>(Duplicati.Library.Interface.TestEntryStatus.Error, ex.Message) });
                        m_results.AddError(string.Format("Failed to process file {0}", vol.Name), ex);
                        if (ex is System.Threading.ThreadAbortException)
                        {
                            throw;
                        }
                    }
                }
            }
            else
            {
                foreach (var f in files)
                {
                    try
                    {
                        if (m_results.TaskControlRendevouz() == TaskControlState.Stop)
                        {
                            return;
                        }

                        progress++;
                        m_results.OperationProgressUpdater.UpdateProgress((float)progress / files.Count);

                        if (f.Size < 0 || string.IsNullOrWhiteSpace(f.Hash))
                        {
                            m_results.AddMessage(string.Format("No hash recorded for {0}, performing full verification", f.Name));
                            KeyValuePair <string, IEnumerable <KeyValuePair <TestEntryStatus, string> > > res;
                            string hash;
                            long   size;

                            using (var tf = backend.GetWithInfo(f.Name, out size, out hash))
                                res = TestVolumeInternals(db, f, tf, m_options, m_results, 1);
                            m_results.AddResult(res.Key, res.Value);

                            if (res.Value != null && !res.Value.Any() && !string.IsNullOrWhiteSpace(hash))
                            {
                                if (!m_options.Dryrun)
                                {
                                    m_results.AddMessage(string.Format("Sucessfully captured hash for {0}, updating database", f.Name));
                                    db.UpdateRemoteVolume(f.Name, RemoteVolumeState.Verified, size, hash);
                                }
                            }
                        }
                        else
                        {
                            backend.GetForTesting(f.Name, f.Size, f.Hash);
                        }
                        db.UpdateVerificationCount(f.Name);
                        m_results.AddResult(f.Name, new KeyValuePair <Duplicati.Library.Interface.TestEntryStatus, string> [0]);
                    }
                    catch (Exception ex)
                    {
                        m_results.AddResult(f.Name, new KeyValuePair <Duplicati.Library.Interface.TestEntryStatus, string>[] { new KeyValuePair <Duplicati.Library.Interface.TestEntryStatus, string>(Duplicati.Library.Interface.TestEntryStatus.Error, ex.Message) });
                        m_results.AddError(string.Format("Failed to process file {0}", f.Name), ex);
                        if (ex is System.Threading.ThreadAbortException)
                        {
                            throw;
                        }
                    }
                }
            }
        }
Пример #34
0
        private void UploadRealFileList(BackendManager backend, FilesetVolumeWriter filesetvolume)
        {
            var changeCount = 
                m_result.AddedFiles + m_result.ModifiedFiles + m_result.DeletedFiles +
                m_result.AddedFolders + m_result.ModifiedFolders + m_result.DeletedFolders +
                m_result.AddedSymlinks + m_result.ModifiedSymlinks + m_result.DeletedSymlinks;

            //Changes in the filelist triggers a filelist upload
            if (m_options.UploadUnchangedBackups || changeCount > 0)
            {
                using(new Logging.Timer("Uploading a new fileset"))
                {
                    if (!string.IsNullOrEmpty(m_options.ControlFiles))
                        foreach(var p in m_options.ControlFiles.Split(new char[] { System.IO.Path.PathSeparator }, StringSplitOptions.RemoveEmptyEntries))
                            filesetvolume.AddControlFile(p, m_options.GetCompressionHintFromFilename(p));

                    m_database.WriteFileset(filesetvolume, m_transaction);
                    filesetvolume.Close();

                    if (m_options.Dryrun)
                        m_result.AddDryrunMessage(string.Format("Would upload fileset volume: {0}, size: {1}", filesetvolume.RemoteFilename, Library.Utility.Utility.FormatSizeString(new FileInfo(filesetvolume.LocalFilename).Length)));
                    else
                    {
                        m_database.UpdateRemoteVolume(filesetvolume.RemoteFilename, RemoteVolumeState.Uploading, -1, null, m_transaction);

                        using(new Logging.Timer("CommitUpdateRemoteVolume"))
                            m_transaction.Commit();
                        m_transaction = m_database.BeginTransaction();

                        backend.Put(filesetvolume);

                        using(new Logging.Timer("CommitUpdateRemoteVolume"))
                            m_transaction.Commit();
                        m_transaction = m_database.BeginTransaction();

                    }
                }
            }
            else
            {
                m_result.AddVerboseMessage("removing temp files, as no data needs to be uploaded");
                m_database.RemoveRemoteVolume(filesetvolume.RemoteFilename, m_transaction);
            }
        }
Пример #35
0
        /// <summary>
        /// Uploads the verification file.
        /// </summary>
        /// <param name="backendurl">The backend url</param>
        /// <param name="options">The options to use</param>
        /// <param name="result">The result writer</param>
        /// <param name="db">The attached database</param>
        /// <param name="transaction">An optional transaction object</param>
        public static void UploadVerificationFile(string backendurl, Options options, IBackendWriter result, LocalDatabase db, System.Data.IDbTransaction transaction)
        {
            using(var backend = new BackendManager(backendurl, options, result, db))
            using(var tempfile = new Library.Utility.TempFile())
            {
                var remotename = options.Prefix + "-verification.json";
                using(var stream = new System.IO.StreamWriter(tempfile, false, System.Text.Encoding.UTF8))
                    FilelistProcessor.CreateVerificationFile(db, stream);

                if (options.Dryrun)
                {
                    result.AddDryrunMessage(string.Format("Would upload verification file: {0}, size: {1}", remotename, Library.Utility.Utility.FormatSizeString(new System.IO.FileInfo(tempfile).Length)));
                }
                else
                {
                    backend.PutUnencrypted(remotename, tempfile);
                    backend.WaitForComplete(db, transaction);
                }
            }
        }
Пример #36
0
    void Start()
    {
        _backendManager = BackendManager.Instance;

        GetCurrencySlots();
    }
Пример #37
0
        private void DoRun(Database.LocalPurgeDatabase db, Library.Utility.IFilter filter, Action <System.Data.IDbCommand, long, string> filtercommand, float pgoffset, float pgspan)
        {
            m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.PurgeFiles_Begin);
            Logging.Log.WriteInformationMessage(LOGTAG, "StartingPurge", "Starting purge operation");

            var doCompactStep = !m_options.NoAutoCompact && filtercommand == null;

            using (var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, db))
            {
                if (db.PartiallyRecreated)
                {
                    throw new UserInformationException("The purge command does not work on partially recreated databases", "PurgeNotAllowedOnPartialDatabase");
                }

                if (db.RepairInProgress && filtercommand == null)
                {
                    throw new UserInformationException(string.Format("The purge command does not work on an incomplete database, try the {0} operation.", "purge-broken-files"), "PurgeNotAllowedOnIncompleteDatabase");
                }

                var versions = db.GetFilesetIDs(m_options.Time, m_options.Version).OrderByDescending(x => x).ToArray();
                if (versions.Length <= 0)
                {
                    throw new UserInformationException("No filesets matched the supplied time or versions", "NoFilesetFoundForTimeOrVersion");
                }

                var orphans = db.CountOrphanFiles(null);
                if (orphans != 0)
                {
                    throw new UserInformationException(string.Format("Unable to start the purge process as there are {0} orphan file(s)", orphans), "CannotPurgeWithOrphans");
                }

                Utility.UpdateOptionsFromDb(db, m_options);
                Utility.VerifyParameters(db, m_options);

                if (filtercommand == null)
                {
                    db.VerifyConsistency(m_options.Blocksize, m_options.BlockhashSize, false, null);

                    if (m_options.NoBackendverification)
                    {
                        FilelistProcessor.VerifyLocalList(backend, db);
                    }
                    else
                    {
                        FilelistProcessor.VerifyRemoteList(backend, m_options, db, m_result.BackendWriter, null);
                    }
                }

                var filesets = db.FilesetTimes.OrderByDescending(x => x.Value).ToArray();

                var versionprogress = ((doCompactStep ? 0.75f : 1.0f) / versions.Length) * pgspan;
                var currentprogress = pgoffset;
                var progress        = 0;

                m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.PurgeFiles_Process);
                m_result.OperationProgressUpdater.UpdateProgress(currentprogress);

                // Reverse makes sure we re-write the old versions first
                foreach (var versionid in versions.Reverse())
                {
                    progress++;
                    Logging.Log.WriteVerboseMessage(LOGTAG, "ProcessingFilelistVolumes", "Processing filelist volume {0} of {1}", progress, versions.Length);

                    using (var tr = db.BeginTransaction())
                    {
                        var ix = -1;
                        for (var i = 0; i < filesets.Length; i++)
                        {
                            if (filesets[i].Key == versionid)
                            {
                                ix = i;
                                break;
                            }
                        }

                        if (ix < 0)
                        {
                            throw new InvalidProgramException(string.Format("Fileset was reported with id {0}, but could not be found?", versionid));
                        }

                        var secs = 0;
                        while (secs < 60)
                        {
                            secs++;
                            var tfn = Volumes.VolumeBase.GenerateFilename(RemoteVolumeType.Files, m_options, null, filesets[ix].Value.AddSeconds(secs));
                            if (db.GetRemoteVolumeID(tfn, tr) < 0)
                            {
                                break;
                            }
                        }

                        var tsOriginal = filesets[ix].Value;
                        var ts         = tsOriginal.AddSeconds(secs);

                        var prevfilename = db.GetRemoteVolumeNameForFileset(filesets[ix].Key, tr);

                        if (secs >= 60)
                        {
                            throw new Exception(string.Format("Unable to create a new fileset for {0} because the resulting timestamp {1} is more than 60 seconds away", prevfilename, ts));
                        }

                        if (ix != 0 && filesets[ix - 1].Value <= ts)
                        {
                            throw new Exception(string.Format("Unable to create a new fileset for {0} because the resulting timestamp {1} is larger than the next timestamp {2}", prevfilename, ts, filesets[ix - 1].Value));
                        }

                        using (var tempset = db.CreateTemporaryFileset(versionid, tr))
                        {
                            if (filtercommand == null)
                            {
                                tempset.ApplyFilter(filter);
                            }
                            else
                            {
                                tempset.ApplyFilter(filtercommand);
                            }

                            if (tempset.RemovedFileCount == 0)
                            {
                                Logging.Log.WriteInformationMessage(LOGTAG, "NotWritingNewFileset", "Not writing a new fileset for {0} as it was not changed", prevfilename);
                                currentprogress += versionprogress;
                                tr.Rollback();
                                continue;
                            }
                            else
                            {
                                using (var tf = new Library.Utility.TempFile())
                                    using (var vol = new Volumes.FilesetVolumeWriter(m_options, ts))
                                    {
                                        var isOriginalFilesetFullBackup = db.IsFilesetFullBackup(tsOriginal);
                                        var newids = tempset.ConvertToPermanentFileset(vol.RemoteFilename, ts, isOriginalFilesetFullBackup);
                                        vol.VolumeID = newids.Item1;
                                        vol.CreateFilesetFile(isOriginalFilesetFullBackup);

                                        Logging.Log.WriteInformationMessage(LOGTAG, "ReplacingFileset", "Replacing fileset {0} with {1} which has with {2} fewer file(s) ({3} reduction)", prevfilename, vol.RemoteFilename, tempset.RemovedFileCount, Library.Utility.Utility.FormatSizeString(tempset.RemovedFileSize));

                                        db.WriteFileset(vol, newids.Item2, tr);

                                        m_result.RemovedFileSize  += tempset.RemovedFileSize;
                                        m_result.RemovedFileCount += tempset.RemovedFileCount;
                                        m_result.RewrittenFileLists++;

                                        currentprogress += (versionprogress / 2);
                                        m_result.OperationProgressUpdater.UpdateProgress(currentprogress);

                                        if (m_options.Dryrun || m_options.FullResult)
                                        {
                                            foreach (var fe in tempset.ListAllDeletedFiles())
                                            {
                                                var msg = string.Format("  Purging file {0} ({1})", fe.Key, Library.Utility.Utility.FormatSizeString(fe.Value));

                                                Logging.Log.WriteProfilingMessage(LOGTAG, "PurgeFile", msg);
                                                Logging.Log.WriteVerboseMessage(LOGTAG, "PurgeFile", msg);

                                                if (m_options.Dryrun)
                                                {
                                                    Logging.Log.WriteDryrunMessage(LOGTAG, "WouldPurgeFile", msg);
                                                }
                                            }

                                            if (m_options.Dryrun)
                                            {
                                                Logging.Log.WriteDryrunMessage(LOGTAG, "WouldWriteRemoteFiles", "Would write files to remote storage");
                                            }

                                            Logging.Log.WriteVerboseMessage(LOGTAG, "WritingRemoteFiles", "Writing files to remote storage");
                                        }

                                        if (m_options.Dryrun)
                                        {
                                            Logging.Log.WriteDryrunMessage(LOGTAG, "WouldUploadAndDelete", "Would upload file {0} ({1}) and delete file {2}, removing {3} files", vol.RemoteFilename, Library.Utility.Utility.FormatSizeString(vol.Filesize), prevfilename, tempset.RemovedFileCount);
                                            tr.Rollback();
                                        }
                                        else
                                        {
                                            var lst = db.DropFilesetsFromTable(new[] { filesets[ix].Value }, tr).ToArray();
                                            foreach (var f in lst)
                                            {
                                                db.UpdateRemoteVolume(f.Key, RemoteVolumeState.Deleting, f.Value, null, tr);
                                            }

                                            tr.Commit();
                                            backend.Put(vol, synchronous: true);
                                            backend.Delete(prevfilename, -1, true);
                                            backend.FlushDbMessages();
                                        }
                                    }
                            }
                        }
                    }

                    currentprogress += (versionprogress / 2);
                    m_result.OperationProgressUpdater.UpdateProgress(currentprogress);
                }


                if (doCompactStep)
                {
                    if (m_result.RewrittenFileLists == 0)
                    {
                        Logging.Log.WriteInformationMessage(LOGTAG, "SkippingCompacting", "Skipping compacting as no new volumes were written");
                    }
                    else
                    {
                        m_result.OperationProgressUpdater.UpdateProgress(pgoffset + (0.75f * pgspan));
                        m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.PurgeFiles_Compact);
                        m_result.CompactResults = new CompactResults(m_result);
                        using (var cdb = new Database.LocalDeleteDatabase(db))
                        {
                            var tr = cdb.BeginTransaction();
                            try
                            {
                                new CompactHandler(backend.BackendUrl, m_options, (CompactResults)m_result.CompactResults).DoCompact(cdb, true, ref tr, backend);
                            }
                            catch
                            {
                                try { tr.Rollback(); }
                                catch { }
                            }
                            finally
                            {
                                try { tr.Commit(); }
                                catch { }
                            }
                        }
                    }

                    m_result.OperationProgressUpdater.UpdateProgress(pgoffset + pgspan);
                    m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.PurgeFiles_Complete);
                }

                backend.WaitForComplete(db, null);
            }
        }
Пример #38
0
        public void DoRun(long samples, LocalTestDatabase db, BackendManager backend)
        {
            var blockhasher = System.Security.Cryptography.HashAlgorithm.Create(m_options.BlockHashAlgorithm);

            if (blockhasher == null)
                throw new Exception(string.Format(Strings.Foresthash.InvalidHashAlgorithm, m_options.BlockHashAlgorithm));
            if (!blockhasher.CanReuseTransform)
                throw new Exception(string.Format(Strings.Foresthash.InvalidCryptoSystem, m_options.BlockHashAlgorithm));

            var hashsize = blockhasher.HashSize / 8;
            var files = db.SelectTestTargets(samples, m_options).ToList();

            if (m_options.FullRemoteVerification)
            {
                foreach(var vol in new AsyncDownloader(files, backend))
                {
                    var parsedInfo = Volumes.VolumeBase.ParseFilename(vol.Name);
                    try
                    {
                        if (m_results.TaskControlRendevouz() == TaskControlState.Stop)
                        {
                            backend.WaitForComplete(db, null);
                            return;
                        }

                        using(var tf = vol.TempFile)
                        {
                            if (parsedInfo.FileType == RemoteVolumeType.Files)
                            {
                                //Compare with db and see if all files are accounted for
                                // with correct file hashes and blocklist hashes
                                using(var fl = db.CreateFilelist(vol.Name))
                                {
                                    using(var rd = new Volumes.FilesetVolumeReader(parsedInfo.CompressionModule, tf, m_options))
                                        foreach(var f in rd.Files)
                                            fl.Add(f.Path, f.Size, f.Hash, f.Metasize, f.Metahash, f.BlocklistHashes, f.Type, f.Time);

                                    m_results.AddResult(vol.Name, fl.Compare().ToList());
                                }
                            }
                            else if (parsedInfo.FileType == RemoteVolumeType.Index)
                            {
                                var blocklinks = new List<Tuple<string, string, long>>();
                                IEnumerable<KeyValuePair<Duplicati.Library.Interface.TestEntryStatus, string>> combined = new KeyValuePair<Duplicati.Library.Interface.TestEntryStatus, string>[0];

                                //Compare with db and see that all hashes and volumes are listed
                                using(var rd = new Volumes.IndexVolumeReader(parsedInfo.CompressionModule, tf, m_options, hashsize))
                                    foreach(var v in rd.Volumes)
                                    {
                                        blocklinks.Add(new Tuple<string, string, long>(v.Filename, v.Hash, v.Length));
                                        using(var bl = db.CreateBlocklist(v.Filename))
                                        {
                                            foreach(var h in v.Blocks)
                                                bl.AddBlock(h.Key, h.Value);

                                            combined = combined.Union(bl.Compare().ToArray());
                                        }
                                    }

                                using(var il = db.CreateIndexlist(vol.Name))
                                {
                                    foreach(var t in blocklinks)
                                        il.AddBlockLink(t.Item1, t.Item2, t.Item3);

                                    combined = combined.Union(il.Compare()).ToList();
                                }

                                m_results.AddResult(vol.Name, combined.ToList());
                            }
                            else if (parsedInfo.FileType == RemoteVolumeType.Blocks)
                            {
                                using(var bl = db.CreateBlocklist(vol.Name))
                                using(var rd = new Volumes.BlockVolumeReader(parsedInfo.CompressionModule, tf, m_options))
                                {
                                    //Verify that all blocks are in the file
                                    foreach(var b in rd.Blocks)
                                        bl.AddBlock(b.Key, b.Value);

                                    //Select 20% random blocks and verify their hashes match the filename and size
                                    var hashsamples = new List<KeyValuePair<string, long>>(rd.Blocks);
                                    var sampleCount = Math.Min(Math.Max(0, (int)(hashsamples.Count * 0.2)), hashsamples.Count - 1);
                                    var rnd = new Random();

                                    while (hashsamples.Count > sampleCount)
                                        hashsamples.RemoveAt(rnd.Next(hashsamples.Count));

                                    var blockbuffer = new byte[m_options.Blocksize];
                                    var changes = new List<KeyValuePair<Library.Interface.TestEntryStatus, string>>();
                                    foreach(var s in hashsamples)
                                    {
                                        var size = rd.ReadBlock(s.Key, blockbuffer);
                                        if (size != s.Value)
                                            changes.Add(new KeyValuePair<Library.Interface.TestEntryStatus, string>(Library.Interface.TestEntryStatus.Modified, s.Key));
                                        else
                                        {
                                            var hash = Convert.ToBase64String(blockhasher.ComputeHash(blockbuffer, 0, size));
                                            if (hash != s.Key)
                                                changes.Add(new KeyValuePair<Library.Interface.TestEntryStatus, string>(Library.Interface.TestEntryStatus.Modified, s.Key));
                                        }
                                    }

                                    m_results.AddResult(vol.Name, changes.Union(bl.Compare().ToList()));
                                }

                            }
                        }

                        db.UpdateVerificationCount(vol.Name);
                    }
                    catch (Exception ex)
                    {
                        m_results.AddResult(vol.Name, new KeyValuePair<Duplicati.Library.Interface.TestEntryStatus, string>[] { new KeyValuePair<Duplicati.Library.Interface.TestEntryStatus, string>(Duplicati.Library.Interface.TestEntryStatus.Error, ex.Message) });
                        m_results.AddError(string.Format("Failed to process file {0}", vol.Name), ex);
                        if (ex is System.Threading.ThreadAbortException)
                            throw;
                    }
                }
            }
            else
            {
                foreach(var f in files)
                {
                    try
                    {
                        if (m_results.TaskControlRendevouz() == TaskControlState.Stop)
                            return;

                        backend.GetForTesting(f.Name, f.Size, f.Hash);
                        db.UpdateVerificationCount(f.Name);
                        m_results.AddResult(f.Name, new KeyValuePair<Duplicati.Library.Interface.TestEntryStatus, string>[0]);
                    }
                    catch (Exception ex)
                    {
                        m_results.AddResult(f.Name, new KeyValuePair<Duplicati.Library.Interface.TestEntryStatus, string>[] { new KeyValuePair<Duplicati.Library.Interface.TestEntryStatus, string>(Duplicati.Library.Interface.TestEntryStatus.Error, ex.Message) });
                        m_results.AddError(string.Format("Failed to process file {0}", f.Name), ex);
                        if (ex is System.Threading.ThreadAbortException)
                            throw;
                    }
                }
            }
        }
Пример #39
0
        internal bool DoCompact(LocalDeleteDatabase db, bool hasVerifiedBackend, ref System.Data.IDbTransaction transaction, BackendManager sharedBackend)
        {
            var report = db.GetCompactReport(m_options.VolumeSize, m_options.Threshold, m_options.SmallFileSize, m_options.SmallFileMaxCount, transaction);

            report.ReportCompactData();

            if (report.ShouldReclaim || report.ShouldCompact)
            {
                // Workaround where we allow a running backendmanager to be used
                using (var bk = sharedBackend == null ? new BackendManager(m_backendurl, m_options, m_result.BackendWriter, db) : null)
                {
                    var backend = bk ?? sharedBackend;
                    if (!hasVerifiedBackend && !m_options.NoBackendverification)
                    {
                        FilelistProcessor.VerifyRemoteList(backend, m_options, db, m_result.BackendWriter);
                    }

                    BlockVolumeWriter newvol = new BlockVolumeWriter(m_options);
                    newvol.VolumeID = db.RegisterRemoteVolume(newvol.RemoteFilename, RemoteVolumeType.Blocks, RemoteVolumeState.Temporary, transaction);

                    IndexVolumeWriter newvolindex = null;
                    if (m_options.IndexfilePolicy != Options.IndexFileStrategy.None)
                    {
                        newvolindex          = new IndexVolumeWriter(m_options);
                        newvolindex.VolumeID = db.RegisterRemoteVolume(newvolindex.RemoteFilename, RemoteVolumeType.Index, RemoteVolumeState.Temporary, transaction);
                        db.AddIndexBlockLink(newvolindex.VolumeID, newvol.VolumeID, transaction);
                    }

                    long   blocksInVolume = 0;
                    byte[] buffer         = new byte[m_options.Blocksize];
                    var    remoteList     = db.GetRemoteVolumes().Where(n => n.State == RemoteVolumeState.Uploaded || n.State == RemoteVolumeState.Verified).ToArray();

                    //These are for bookkeeping
                    var uploadedVolumes   = new List <KeyValuePair <string, long> >();
                    var deletedVolumes    = new List <KeyValuePair <string, long> >();
                    var downloadedVolumes = new List <KeyValuePair <string, long> >();

                    //We start by deleting unused volumes to save space before uploading new stuff
                    var fullyDeleteable = (from v in remoteList
                                           where report.DeleteableVolumes.Contains(v.Name)
                                           select(IRemoteVolume) v).ToList();
                    deletedVolumes.AddRange(DoDelete(db, backend, fullyDeleteable, ref transaction));

                    // This list is used to pick up unused volumes,
                    // so they can be deleted once the upload of the
                    // required fragments is complete
                    var deleteableVolumes = new List <IRemoteVolume>();

                    if (report.ShouldCompact)
                    {
                        newvolindex?.StartVolume(newvol.RemoteFilename);
                        var volumesToDownload = (from v in remoteList
                                                 where report.CompactableVolumes.Contains(v.Name)
                                                 select(IRemoteVolume) v).ToList();

                        using (var q = db.CreateBlockQueryHelper(transaction))
                        {
                            foreach (var entry in new AsyncDownloader(volumesToDownload, backend))
                            {
                                using (var tmpfile = entry.TempFile)
                                {
                                    if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                                    {
                                        backend.WaitForComplete(db, transaction);
                                        return(false);
                                    }

                                    downloadedVolumes.Add(new KeyValuePair <string, long>(entry.Name, entry.Size));
                                    var inst = VolumeBase.ParseFilename(entry.Name);
                                    using (var f = new BlockVolumeReader(inst.CompressionModule, tmpfile, m_options))
                                    {
                                        foreach (var e in f.Blocks)
                                        {
                                            if (q.UseBlock(e.Key, e.Value, transaction))
                                            {
                                                //TODO: How do we get the compression hint? Reverse query for filename in db?
                                                var s = f.ReadBlock(e.Key, buffer);
                                                if (s != e.Value)
                                                {
                                                    throw new Exception(string.Format("Size mismatch problem for block {0}, {1} vs {2}", e.Key, s, e.Value));
                                                }

                                                newvol.AddBlock(e.Key, buffer, 0, s, Duplicati.Library.Interface.CompressionHint.Compressible);
                                                if (newvolindex != null)
                                                {
                                                    newvolindex.AddBlock(e.Key, e.Value);
                                                }

                                                db.MoveBlockToNewVolume(e.Key, e.Value, newvol.VolumeID, transaction);
                                                blocksInVolume++;

                                                if (newvol.Filesize > m_options.VolumeSize)
                                                {
                                                    uploadedVolumes.Add(new KeyValuePair <string, long>(newvol.RemoteFilename, newvol.Filesize));
                                                    if (newvolindex != null)
                                                    {
                                                        uploadedVolumes.Add(new KeyValuePair <string, long>(newvolindex.RemoteFilename, newvolindex.Filesize));
                                                    }

                                                    if (!m_options.Dryrun)
                                                    {
                                                        backend.Put(newvol, newvolindex);
                                                    }
                                                    else
                                                    {
                                                        Logging.Log.WriteDryrunMessage(LOGTAG, "WouldUploadGeneratedBlockset", "Would upload generated blockset of size {0}", Library.Utility.Utility.FormatSizeString(newvol.Filesize));
                                                    }


                                                    newvol          = new BlockVolumeWriter(m_options);
                                                    newvol.VolumeID = db.RegisterRemoteVolume(newvol.RemoteFilename, RemoteVolumeType.Blocks, RemoteVolumeState.Temporary, transaction);

                                                    if (m_options.IndexfilePolicy != Options.IndexFileStrategy.None)
                                                    {
                                                        newvolindex          = new IndexVolumeWriter(m_options);
                                                        newvolindex.VolumeID = db.RegisterRemoteVolume(newvolindex.RemoteFilename, RemoteVolumeType.Index, RemoteVolumeState.Temporary, transaction);
                                                        db.AddIndexBlockLink(newvolindex.VolumeID, newvol.VolumeID, transaction);
                                                        newvolindex.StartVolume(newvol.RemoteFilename);
                                                    }

                                                    blocksInVolume = 0;

                                                    //After we upload this volume, we can delete all previous encountered volumes
                                                    deletedVolumes.AddRange(DoDelete(db, backend, deleteableVolumes, ref transaction));
                                                    deleteableVolumes = new List <IRemoteVolume>();
                                                }
                                            }
                                        }
                                    }

                                    deleteableVolumes.Add(entry);
                                }
                            }

                            if (blocksInVolume > 0)
                            {
                                uploadedVolumes.Add(new KeyValuePair <string, long>(newvol.RemoteFilename, newvol.Filesize));
                                if (newvolindex != null)
                                {
                                    uploadedVolumes.Add(new KeyValuePair <string, long>(newvolindex.RemoteFilename, newvolindex.Filesize));
                                }
                                if (!m_options.Dryrun)
                                {
                                    backend.Put(newvol, newvolindex);
                                }
                                else
                                {
                                    Logging.Log.WriteDryrunMessage(LOGTAG, "WouldUploadGeneratedBlockset", "Would upload generated blockset of size {0}", Library.Utility.Utility.FormatSizeString(newvol.Filesize));
                                }
                            }
                            else
                            {
                                db.RemoveRemoteVolume(newvol.RemoteFilename, transaction);
                                if (newvolindex != null)
                                {
                                    db.RemoveRemoteVolume(newvolindex.RemoteFilename, transaction);
                                    newvolindex.FinishVolume(null, 0);
                                }
                            }
                        }
                    }
                    else
                    {
                        newvolindex?.Dispose();
                        newvol.Dispose();
                    }

                    deletedVolumes.AddRange(DoDelete(db, backend, deleteableVolumes, ref transaction));

                    var downloadSize = downloadedVolumes.Where(x => x.Value >= 0).Aggregate(0L, (a, x) => a + x.Value);
                    var deletedSize  = deletedVolumes.Where(x => x.Value >= 0).Aggregate(0L, (a, x) => a + x.Value);
                    var uploadSize   = uploadedVolumes.Where(x => x.Value >= 0).Aggregate(0L, (a, x) => a + x.Value);

                    m_result.DeletedFileCount    = deletedVolumes.Count;
                    m_result.DownloadedFileCount = downloadedVolumes.Count;
                    m_result.UploadedFileCount   = uploadedVolumes.Count;
                    m_result.DeletedFileSize     = deletedSize;
                    m_result.DownloadedFileSize  = downloadSize;
                    m_result.UploadedFileSize    = uploadSize;
                    m_result.Dryrun = m_options.Dryrun;

                    if (m_result.Dryrun)
                    {
                        if (downloadedVolumes.Count == 0)
                        {
                            Logging.Log.WriteDryrunMessage(LOGTAG, "CompactResults", "Would delete {0} files, which would reduce storage by {1}", m_result.DeletedFileCount, Library.Utility.Utility.FormatSizeString(m_result.DeletedFileSize));
                        }
                        else
                        {
                            Logging.Log.WriteDryrunMessage(LOGTAG, "CompactResults", "Would download {0} file(s) with a total size of {1}, delete {2} file(s) with a total size of {3}, and compact to {4} file(s) with a size of {5}, which would reduce storage by {6} file(s) and {7}",
                                                           m_result.DownloadedFileCount,
                                                           Library.Utility.Utility.FormatSizeString(m_result.DownloadedFileSize),
                                                           m_result.DeletedFileCount,
                                                           Library.Utility.Utility.FormatSizeString(m_result.DeletedFileSize), m_result.UploadedFileCount,
                                                           Library.Utility.Utility.FormatSizeString(m_result.UploadedFileSize),
                                                           m_result.DeletedFileCount - m_result.UploadedFileCount,
                                                           Library.Utility.Utility.FormatSizeString(m_result.DeletedFileSize - m_result.UploadedFileSize));
                        }
                    }
                    else
                    {
                        if (m_result.DownloadedFileCount == 0)
                        {
                            Logging.Log.WriteInformationMessage(LOGTAG, "CompactResults", "Deleted {0} files, which reduced storage by {1}", m_result.DeletedFileCount, Library.Utility.Utility.FormatSizeString(m_result.DeletedFileSize));
                        }
                        else
                        {
                            Logging.Log.WriteInformationMessage(LOGTAG, "CompactResults", "Downloaded {0} file(s) with a total size of {1}, deleted {2} file(s) with a total size of {3}, and compacted to {4} file(s) with a size of {5}, which reduced storage by {6} file(s) and {7}",
                                                                m_result.DownloadedFileCount,
                                                                Library.Utility.Utility.FormatSizeString(downloadSize),
                                                                m_result.DeletedFileCount,
                                                                Library.Utility.Utility.FormatSizeString(m_result.DeletedFileSize),
                                                                m_result.UploadedFileCount,
                                                                Library.Utility.Utility.FormatSizeString(m_result.UploadedFileSize),
                                                                m_result.DeletedFileCount - m_result.UploadedFileCount,
                                                                Library.Utility.Utility.FormatSizeString(m_result.DeletedFileSize - m_result.UploadedFileSize));
                        }
                    }

                    backend.WaitForComplete(db, transaction);
                }

                m_result.EndTime = DateTime.UtcNow;
                return((m_result.DeletedFileCount + m_result.UploadedFileCount) > 0);
            }
            else
            {
                m_result.EndTime = DateTime.UtcNow;
                return(false);
            }
        }
Пример #40
0
        /// <summary>
        /// Helper method that verifies uploaded volumes and updates their state in the database.
        /// Throws an error if there are issues with the remote storage
        /// </summary>
        /// <param name="backend">The backend instance to use</param>
        /// <param name="options">The options used</param>
        /// <param name="database">The database to compare with</param>
        public static RemoteAnalysisResult RemoteListAnalysis(BackendManager backend, Options options, LocalDatabase database, IBackendWriter log)
        {
            var rawlist = backend.List();
            var lookup = new Dictionary<string, Volumes.IParsedVolume>();

            var remotelist = (from n in rawlist
                                       let p = Volumes.VolumeBase.ParseFilename(n)
                                        where p != null && p.Prefix == options.Prefix
                                       select p).ToList();

            var otherlist = (from n in rawlist
                                let p = Volumes.VolumeBase.ParseFilename(n)
                                where p != null && p.Prefix != options.Prefix
                                select p).ToList();

            var unknownlist = (from n in rawlist
                                        let p = Volumes.VolumeBase.ParseFilename(n)
                                        where p == null
                                        select n).ToList();

            var filesets = (from n in remotelist
                                     where n.FileType == RemoteVolumeType.Files orderby n.Time descending
                                     select n).ToList();

            log.KnownFileCount = remotelist.Count();
            log.KnownFileSize = remotelist.Select(x => x.File.Size).Sum();
            log.UnknownFileCount = unknownlist.Count();
            log.UnknownFileSize = unknownlist.Select(x => x.Size).Sum();
            log.BackupListCount = filesets.Count;
            log.LastBackupDate = filesets.Count == 0 ? new DateTime(0) : filesets[0].Time.ToLocalTime();

            if (backend is Library.Interface.IQuotaEnabledBackend)
            {
                log.TotalQuotaSpace = ((Library.Interface.IQuotaEnabledBackend)backend).TotalQuotaSpace;
                log.FreeQuotaSpace = ((Library.Interface.IQuotaEnabledBackend)backend).FreeQuotaSpace;
            }

            log.AssignedQuotaSpace = options.QuotaSize;

            foreach(var s in remotelist)
                lookup[s.File.Name] = s;

            var missing = new List<RemoteVolumeEntry>();
            var missingHash = new List<Tuple<long, RemoteVolumeEntry>>();
            var cleanupRemovedRemoteVolumes = new HashSet<string>();

            foreach(var e in database.DuplicateRemoteVolumes())
            {
                if (e.Value == RemoteVolumeState.Uploading || e.Value == RemoteVolumeState.Temporary)
                    database.UnlinkRemoteVolume(e.Key, e.Value);
                else
                    throw new Exception(string.Format("The remote volume {0} appears in the database with state {1} and a deleted state, cannot continue", e.Key, e.Value.ToString()));
            }

            var locallist = database.GetRemoteVolumes();
            foreach(var i in locallist)
            {
                Volumes.IParsedVolume r;
                var remoteFound = lookup.TryGetValue(i.Name, out r);
                var correctSize = remoteFound && i.Size >= 0 && (i.Size == r.File.Size || r.File.Size < 0);

                lookup.Remove(i.Name);

                switch (i.State)
                {
                    case RemoteVolumeState.Deleted:
                        if (remoteFound)
                            log.AddMessage(string.Format("ignoring remote file listed as {0}: {1}", i.State, i.Name));

                        break;

                    case RemoteVolumeState.Temporary:
                    case RemoteVolumeState.Deleting:
                        if (remoteFound)
                        {
                            log.AddMessage(string.Format("removing remote file listed as {0}: {1}", i.State, i.Name));
                            backend.Delete(i.Name, i.Size, true);
                        }
                        else
                        {
                            if (i.deleteGracePeriod > DateTime.UtcNow)
                            {
                                log.AddMessage(string.Format("keeping delete request for {0} until {1}", i.Name, i.deleteGracePeriod.ToLocalTime()));
                            }
                            else
                            {
                                log.AddMessage(string.Format("removing file listed as {0}: {1}", i.State, i.Name));
                                cleanupRemovedRemoteVolumes.Add(i.Name);
                            }
                        }
                        break;
                    case RemoteVolumeState.Uploading:
                        if (remoteFound && correctSize && r.File.Size >= 0)
                        {
                            log.AddMessage(string.Format("promoting uploaded complete file from {0} to {2}: {1}", i.State, i.Name, RemoteVolumeState.Uploaded));
                            database.UpdateRemoteVolume(i.Name, RemoteVolumeState.Uploaded, i.Size, i.Hash);
                        }
                        else if (!remoteFound)
                        {
                            log.AddMessage(string.Format("scheduling missing file for deletion, currently listed as {0}: {1}", i.State, i.Name));
                            cleanupRemovedRemoteVolumes.Add(i.Name);
                            database.UpdateRemoteVolume(i.Name, RemoteVolumeState.Deleting, i.Size, i.Hash, false, TimeSpan.FromHours(2), null);
                        }
                        else
                        {
                            log.AddMessage(string.Format("removing incomplete remote file listed as {0}: {1}", i.State, i.Name));
                            backend.Delete(i.Name, i.Size, true);
                        }
                        break;

                    case RemoteVolumeState.Uploaded:
                        if (!remoteFound)
                            missing.Add(i);
                        else if (correctSize)
                            database.UpdateRemoteVolume(i.Name, RemoteVolumeState.Verified, i.Size, i.Hash);
                        else
                            missingHash.Add(new Tuple<long, RemoteVolumeEntry>(r.File.Size, i));

                        break;

                    case RemoteVolumeState.Verified:
                        if (!remoteFound)
                            missing.Add(i);
                        else if (!correctSize)
                            missingHash.Add(new Tuple<long, RemoteVolumeEntry>(r.File.Size, i));

                        break;

                    default:
                        log.AddWarning(string.Format("unknown state for remote file listed as {0}: {1}", i.State, i.Name), null);
                        break;
                }

                backend.FlushDbMessages();
            }

            // cleanup deleted volumes in DB en block
            database.RemoveRemoteVolumes(cleanupRemovedRemoteVolumes, null);

            foreach(var i in missingHash)
                log.AddWarning(string.Format("remote file {1} is listed as {0} with size {2} but should be {3}, please verify the sha256 hash \"{4}\"", i.Item2.State, i.Item2.Name, i.Item1, i.Item2.Size, i.Item2.Hash), null);

            return new RemoteAnalysisResult() {
                ParsedVolumes = remotelist,
                OtherVolumes = otherlist,
                ExtraVolumes = lookup.Values,
                MissingVolumes = missing,
                VerificationRequiredVolumes = missingHash.Select(x => x.Item2)
            };
        }
Пример #41
0
        private void DoRun(LocalDatabase dbparent, Library.Utility.IFilter filter, RestoreResults result)
        {
            //In this case, we check that the remote storage fits with the database.
            //We can then query the database and find the blocks that we need to do the restore
            using(var database = new LocalRestoreDatabase(dbparent, m_options.Blocksize))
            using(var backend = new BackendManager(m_backendurl, m_options, result.BackendWriter, database))
            {
                database.SetResult(m_result);
                Utility.VerifyParameters(database, m_options);
	        	
                var blockhasher = System.Security.Cryptography.HashAlgorithm.Create(m_options.BlockHashAlgorithm);
                var filehasher = System.Security.Cryptography.HashAlgorithm.Create(m_options.FileHashAlgorithm);
                if (blockhasher == null)
                    throw new Exception(string.Format(Strings.Foresthash.InvalidHashAlgorithm, m_options.BlockHashAlgorithm));
                if (!blockhasher.CanReuseTransform)
                    throw new Exception(string.Format(Strings.Foresthash.InvalidCryptoSystem, m_options.BlockHashAlgorithm));

                if (filehasher == null)
                    throw new Exception(string.Format(Strings.Foresthash.InvalidHashAlgorithm, m_options.FileHashAlgorithm));
                if (!filehasher.CanReuseTransform)
                    throw new Exception(string.Format(Strings.Foresthash.InvalidCryptoSystem, m_options.FileHashAlgorithm));

                if (!m_options.NoBackendverification)
                {
                    m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Restore_PreRestoreVerify);                
                    FilelistProcessor.VerifyRemoteList(backend, m_options, database, result.BackendWriter);
                }

                //Figure out what files are to be patched, and what blocks are needed
                m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Restore_CreateFileList);
                using(new Logging.Timer("PrepareBlockList"))
                    PrepareBlockAndFileList(database, m_options, filter, result);

                //Make the entire output setup
                m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Restore_CreateTargetFolders);
                using(new Logging.Timer("CreateDirectory"))
                    CreateDirectoryStructure(database, m_options, result);
                
                //If we are patching an existing target folder, do not touch stuff that is already updated
                m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Restore_ScanForExistingFiles);
                using(new Logging.Timer("ScanForexistingTargetBlocks"))
                    ScanForExistingTargetBlocks(database, m_blockbuffer, blockhasher, filehasher, m_options, result);

                //Look for existing blocks in the original source files only
                using(new Logging.Timer("ScanForExistingSourceBlocksFast"))
#if DEBUG
                    if (!m_options.NoLocalBlocks && !string.IsNullOrEmpty(m_options.Restorepath))
#else
				    if (!string.IsNullOrEmpty(m_options.Restorepath))
#endif
                    {
                        m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Restore_ScanForLocalBlocks);
                        ScanForExistingSourceBlocksFast(database, m_options, m_blockbuffer, blockhasher, result);
                    }

                if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                {
                    backend.WaitForComplete(database, null);
                    return;
                }

                // If other local files already have the blocks we want, we use them instead of downloading
                if (m_options.PatchWithLocalBlocks)
                {
                    m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Restore_PatchWithLocalBlocks);
                    using(new Logging.Timer("PatchWithLocalBlocks"))
                        ScanForExistingSourceBlocks(database, m_options, m_blockbuffer, blockhasher, result);
                }

                if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                {
                    backend.WaitForComplete(database, null);
                    return;
                }
                
                // Fill BLOCKS with remote sources
                var volumes = database.GetMissingVolumes().ToList();

                if (volumes.Count > 0)
                {
                    m_result.AddMessage(string.Format("{0} remote files are required to restore", volumes.Count));
                    m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Restore_DownloadingRemoteFiles);
                }

                var brokenFiles = new List<string>();
				foreach(var blockvolume in new AsyncDownloader(volumes, backend))
					try
					{
                        if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                        {
                            backend.WaitForComplete(database, null);
                            return;
                        }
                    
						using(var tmpfile = blockvolume.TempFile)
						using(var blocks = new BlockVolumeReader(GetCompressionModule(blockvolume.Name), tmpfile, m_options))
							PatchWithBlocklist(database, blocks, m_options, result, m_blockbuffer);
					}
					catch (Exception ex)
					{
                        brokenFiles.Add(blockvolume.Name);
                        result.AddError(string.Format("Failed to patch with remote file: \"{0}\", message: {1}", blockvolume.Name, ex.Message), ex);
                        if (ex is System.Threading.ThreadAbortException)
                            throw;
					}
                
                // Reset the filehasher if it was used to verify existing files
                filehasher.Initialize();
					
                if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                    return;
                
                m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Restore_PostRestoreVerify);
                
                var fileErrors = 0L;
                // After all blocks in the files are restored, verify the file hash
                using(new Logging.Timer("RestoreVerification"))
                    foreach (var file in database.GetFilesToRestore())
                    {
                        try
                        {
                            if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                            {
                                backend.WaitForComplete(database, null);
                                return;
                            }
                            
                            result.AddVerboseMessage("Testing restored file integrity: {0}", file.Path);
                            
                            string key;
                            long size;
                            using (var fs = m_systemIO.FileOpenRead(file.Path))
                            {
                                size = fs.Length;
                                key = Convert.ToBase64String(filehasher.ComputeHash(fs));
                            }
    
                            if (key != file.Hash)
                                throw new Exception(string.Format("Failed to restore file: \"{0}\". File hash is {1}, expected hash is {2}", file.Path, key, file.Hash));
                            result.FilesRestored++;
                            result.SizeOfRestoredFiles += size;
                        } 
                        catch (Exception ex)
                        {
                            fileErrors++;
                            result.AddWarning(ex.Message, ex);
                            if (ex is System.Threading.ThreadAbortException)
                                throw;
                        }
                    }
                    
                if (fileErrors > 0 && brokenFiles.Count > 0)
                    m_result.AddMessage(string.Format("Failed to restore {0} files, additionally the following files failed to download, which may be the cause:{1}", fileErrors, Environment.NewLine, string.Join(Environment.NewLine, brokenFiles)));

                // Drop the temp tables
                database.DropRestoreTable();
                backend.WaitForComplete(database, null);
            }
            
            m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Restore_Complete);
            result.EndTime = DateTime.UtcNow;
        }
Пример #42
0
        public void Run(IEnumerable<string> filterstrings = null, Library.Utility.IFilter compositefilter = null)
        {
            var parsedfilter = new Library.Utility.FilterExpression(filterstrings);
            var simpleList = !(parsedfilter.Type == Library.Utility.FilterType.Simple || m_options.AllVersions);
            var filter = Library.Utility.JoinedFilterExpression.Join(parsedfilter, compositefilter);

            //Use a speedy local query
            if (!m_options.NoLocalDb && System.IO.File.Exists(m_options.Dbpath))
                using(var db = new Database.LocalListDatabase(m_options.Dbpath))
                {
                    m_result.SetDatabase(db);
                    using(var filesets = db.SelectFileSets(m_options.Time, m_options.Version))
                    {
                        if (parsedfilter.Type != Library.Utility.FilterType.Empty)
                        {
                            if (simpleList || (m_options.ListFolderContents && !m_options.AllVersions))
                                filesets.TakeFirst();
                        }

                        IEnumerable<Database.LocalListDatabase.IFileversion> files;
                        if (m_options.ListFolderContents)
                            files = filesets.SelectFolderContents(filter);
                        else if (m_options.ListPrefixOnly)
                            files = filesets.GetLargestPrefix(filter);
                        else if (parsedfilter.Type == Duplicati.Library.Utility.FilterType.Empty)
                            files = null;
                        else
                            files = filesets.SelectFiles(filter);

                        if (m_options.ListSetsOnly)
                            m_result.SetResult(
                                filesets.QuickSets.Select(x => new ListResultFileset(x.Version, x.Time, x.FileCount, x.FileSizes)).ToArray(),
                                null
                            );
                        else
                            m_result.SetResult(
                                filesets.Sets.Select(x => new ListResultFileset(x.Version, x.Time, x.FileCount, x.FileSizes)).ToArray(),
                                files == null ? null :
                                    (from n in files
                                     select (Duplicati.Library.Interface.IListResultFile)(new ListResultFile(n.Path, n.Sizes.ToArray())))
                                     .ToArray()
                            );

                        return;
                    }
                }

            m_result.AddMessage("No local database, accessing remote store");

            //TODO: Add prefix and foldercontents

            // Otherwise, grab info from remote location
            using (var tmpdb = new Library.Utility.TempFile())
            using (var db = new Database.LocalDatabase(tmpdb, "List"))
            using (var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, db))
            {
                m_result.SetDatabase(db);

                var filteredList = ParseAndFilterFilesets(backend.List(), m_options);
                if (filteredList.Count == 0)
                    throw new Exception("No filesets found on remote target");

                var numberSeq = CreateResultSequence(filteredList);
                if (parsedfilter.Type == Library.Utility.FilterType.Empty)
                {
                    m_result.SetResult(numberSeq, null);
                    m_result.EncryptedFiles = filteredList.Any(x => !string.IsNullOrWhiteSpace(x.Value.EncryptionModule));
                    return;
                }

                var firstEntry = filteredList[0].Value;
                filteredList.RemoveAt(0);
                Dictionary<string, List<long>> res;

                if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                    return;

                using (var tmpfile = backend.Get(firstEntry.File.Name, firstEntry.File.Size, null))
                using (var rd = new Volumes.FilesetVolumeReader(RestoreHandler.GetCompressionModule(firstEntry.File.Name), tmpfile, m_options))
                    if (simpleList)
                    {
                        m_result.SetResult(
                            numberSeq.Take(1),
                            (from n in rd.Files
                                  where Library.Utility.FilterExpression.Matches(filter, n.Path)
                                  orderby n.Path
                                  select new ListResultFile(n.Path, new long[] { n.Size }))
                                  .ToArray()
                        );

                        return;
                    }
                    else
                    {
                        res = rd.Files
                              .Where(x => Library.Utility.FilterExpression.Matches(filter, x.Path))
                              .ToDictionary(
                                    x => x.Path,
                                    y =>
                                    {
                                        var lst = new List<long>();
                                        lst.Add(y.Size);
                                        return lst;
                                    },
                                    Library.Utility.Utility.ClientFilenameStringComparer
                              );
                    }

                long flindex = 1;
                foreach(var flentry in filteredList)
                    using(var tmpfile = backend.Get(flentry.Value.File.Name, flentry.Value.File == null ? -1 : flentry.Value.File.Size, null))
                    using (var rd = new Volumes.FilesetVolumeReader(flentry.Value.CompressionModule, tmpfile, m_options))
                    {
                        if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                            return;

                        foreach(var p in from n in rd.Files where Library.Utility.FilterExpression.Matches(filter, n.Path) select n)
                        {
                            List<long> lst;
                            if (!res.TryGetValue(p.Path, out lst))
                            {
                                lst = new List<long>();
                                res[p.Path] = lst;
                                for(var i = 0; i < flindex; i++)
                                    lst.Add(-1);
                            }

                            lst.Add(p.Size);
                        }

                        foreach(var n in from i in res where i.Value.Count < flindex + 1 select i)
                            n.Value.Add(-1);

                        flindex++;
                    }

                m_result.SetResult(
                    numberSeq,
                    from n in res
                    orderby n.Key
                    select (Duplicati.Library.Interface.IListResultFile)(new ListResultFile(n.Key, n.Value))
               );
            }
        }
Пример #43
0
        public void DoRun(long samples, LocalTestDatabase db, BackendManager backend)
        {
            var files = db.SelectTestTargets(samples, m_options).ToList();

            m_results.OperationProgressUpdater.UpdatePhase(OperationPhase.Verify_Running);
            m_results.OperationProgressUpdater.UpdateProgress(0);
            var progress = 0L;
            
            if (m_options.FullRemoteVerification)
            {
                foreach(var vol in new AsyncDownloader(files, backend))
                {
                    try
                    {
                        if (m_results.TaskControlRendevouz() == TaskControlState.Stop)
                        {
                            backend.WaitForComplete(db, null);
                            return;
                        }    

                        progress++;
                        m_results.OperationProgressUpdater.UpdateProgress((float)progress / files.Count);

                        KeyValuePair<string, IEnumerable<KeyValuePair<TestEntryStatus, string>>> res;
                        using(var tf = vol.TempFile)
                            res = TestVolumeInternals(db, vol, tf, m_options, m_results);
                        m_results.AddResult(res.Key, res.Value);
                        
                        db.UpdateVerificationCount(vol.Name);
                    }
                    catch (Exception ex)
                    {
                        m_results.AddResult(vol.Name, new KeyValuePair<Duplicati.Library.Interface.TestEntryStatus, string>[] { new KeyValuePair<Duplicati.Library.Interface.TestEntryStatus, string>(Duplicati.Library.Interface.TestEntryStatus.Error, ex.Message) });
                        m_results.AddError(string.Format("Failed to process file {0}", vol.Name), ex);
                        if (ex is System.Threading.ThreadAbortException)
                            throw;
                    }
                }
            }
            else
            {
                foreach(var f in files)
                {
                    try
                    {   
                        if (m_results.TaskControlRendevouz() == TaskControlState.Stop)
                            return;

                        progress++;
                        m_results.OperationProgressUpdater.UpdateProgress((float)progress / files.Count);

                        if (f.Size < 0 || string.IsNullOrWhiteSpace(f.Hash))
                        {
                            m_results.AddMessage(string.Format("No hash recorded for {0}, performing full verification", f.Name));
                            KeyValuePair<string, IEnumerable<KeyValuePair<TestEntryStatus, string>>> res;
                            string hash;
                            long size;

                            using (var tf = backend.GetWithInfo(f.Name, out size, out hash))
                                res = TestVolumeInternals(db, f, tf, m_options, m_results, 1);
                            m_results.AddResult(res.Key, res.Value);
                            
                            if (res.Value != null && !res.Value.Any() && !string.IsNullOrWhiteSpace(hash))
                            {
                                if (!m_options.Dryrun)
                                {
                                    m_results.AddMessage(string.Format("Sucessfully captured hash for {0}, updating database", f.Name));
                                    db.UpdateRemoteVolume(f.Name, RemoteVolumeState.Verified, size, hash);
                                }
                            }
                        }
                        else
                            backend.GetForTesting(f.Name, f.Size, f.Hash);
                        db.UpdateVerificationCount(f.Name);
                        m_results.AddResult(f.Name, new KeyValuePair<Duplicati.Library.Interface.TestEntryStatus, string>[0]);
                    }
                    catch (Exception ex)
                    {
                        m_results.AddResult(f.Name, new KeyValuePair<Duplicati.Library.Interface.TestEntryStatus, string>[] { new KeyValuePair<Duplicati.Library.Interface.TestEntryStatus, string>(Duplicati.Library.Interface.TestEntryStatus.Error, ex.Message) });
                        m_results.AddError(string.Format("Failed to process file {0}", f.Name), ex);
                        if (ex is System.Threading.ThreadAbortException)
                            throw;
                    }
                }
            }
        }
Пример #44
0
        private IEnumerable <KeyValuePair <string, long> > DoDelete(LocalDeleteDatabase db, BackendManager backend, List <IRemoteVolume> deleteableVolumes, System.Data.IDbTransaction transaction)
        {
            foreach (var f in db.GetDeletableVolumes(deleteableVolumes, transaction))
            {
                if (!m_options.Dryrun)
                {
                    backend.Delete(f.Name, f.Size);
                }
                else
                {
                    m_result.AddDryrunMessage(string.Format("Would delete remote file: {0}, size: {1}", f.Name, Library.Utility.Utility.FormatSizeString(f.Size)));
                }

                yield return(new KeyValuePair <string, long>(f.Name, f.Size));
            }

            deleteableVolumes.Clear();
        }
Пример #45
0
        public void RunRepairRemote()
        {
            if (!System.IO.File.Exists(m_options.Dbpath))
                throw new Exception(string.Format("Database file does not exist: {0}", m_options.Dbpath));

            m_result.OperationProgressUpdater.UpdateProgress(0);

            using(var db = new LocalRepairDatabase(m_options.Dbpath))
            using(var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, db))
            {
                m_result.SetDatabase(db);
                Utility.VerifyParameters(db, m_options);

                var tp = FilelistProcessor.RemoteListAnalysis(backend, m_options, db, m_result.BackendWriter);
                var buffer = new byte[m_options.Blocksize];
                var blockhasher = System.Security.Cryptography.HashAlgorithm.Create(m_options.BlockHashAlgorithm);
                var hashsize = blockhasher.HashSize / 8;

                if (blockhasher == null)
                    throw new Exception(string.Format(Strings.Foresthash.InvalidHashAlgorithm, m_options.BlockHashAlgorithm));
                if (!blockhasher.CanReuseTransform)
                    throw new Exception(string.Format(Strings.Foresthash.InvalidCryptoSystem, m_options.BlockHashAlgorithm));
				
                var progress = 0;
                var targetProgess = tp.ExtraVolumes.Count() + tp.MissingVolumes.Count() + tp.VerificationRequiredVolumes.Count();

                if (tp.ExtraVolumes.Count() > 0 || tp.MissingVolumes.Count() > 0 || tp.VerificationRequiredVolumes.Count() > 0)
                {
                    if (m_options.Dryrun)
                    {
                        if (!tp.BackupPrefixes.Contains(m_options.Prefix) && tp.ParsedVolumes.Count() > 0)
                        {
                            if (tp.BackupPrefixes.Length == 1)
                                throw new Exception(string.Format("Found no backup files with prefix {0}, but files with prefix {1}, did you forget to set the backup-prefix?", m_options.Prefix, tp.BackupPrefixes[0]));
                            else
                                throw new Exception(string.Format("Found no backup files with prefix {0}, but files with prefixes {1}, did you forget to set the backup-prefix?", m_options.Prefix, string.Join(", ", tp.BackupPrefixes)));
                        }
                        else if (tp.ParsedVolumes.Count() == 0 && tp.ExtraVolumes.Count() > 0)
                        {
                            throw new Exception(string.Format("No files were missing, but {0} remote files were, found, did you mean to run recreate-database?", tp.ExtraVolumes.Count()));
                        }
                    }

                    if (tp.VerificationRequiredVolumes.Any())
                    {
                        using(var testdb = new LocalTestDatabase(db))
                        {
                            foreach(var n in tp.VerificationRequiredVolumes)
                                try
                                {
                                    if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                                    {
                                        backend.WaitForComplete(db, null);
                                        return;
                                    }

                                    progress++;
                                    m_result.OperationProgressUpdater.UpdateProgress((float)progress / targetProgess);

                                    long size;
                                    string hash;
                                    KeyValuePair<string, IEnumerable<KeyValuePair<Duplicati.Library.Interface.TestEntryStatus, string>>> res;
                                   
                                    using (var tf = backend.GetWithInfo(n.Name, out size, out hash))
                                        res = TestHandler.TestVolumeInternals(testdb, n, tf, m_options, m_result, 1);

                                    if (res.Value.Any())
                                        throw new Exception(string.Format("Remote verification failure: {0}", res.Value.First()));

                                    if (!m_options.Dryrun)
                                    {
                                        m_result.AddMessage(string.Format("Sucessfully captured hash for {0}, updating database", n.Name));
                                        db.UpdateRemoteVolume(n.Name, RemoteVolumeState.Verified, size, hash);
                                    }

                                }
                                catch (Exception ex)
                                {
                                    m_result.AddError(string.Format("Failed to perform verification for file: {0}, please run verify; message: {1}", n.Name, ex.Message), ex);
                                    if (ex is System.Threading.ThreadAbortException)
                                        throw;
                                }
                        }
                    }

                    // TODO: It is actually possible to use the extra files if we parse them
                    foreach(var n in tp.ExtraVolumes)
                        try
                        {
                            if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                            {
                                backend.WaitForComplete(db, null);
                                return;
                            }

                            progress++;
                            m_result.OperationProgressUpdater.UpdateProgress((float)progress / targetProgess);
                        
                            if (!m_options.Dryrun)
                            {
                                db.RegisterRemoteVolume(n.File.Name, n.FileType, RemoteVolumeState.Deleting);								
                                backend.Delete(n.File.Name, n.File.Size);
                            }
                            else
                                m_result.AddDryrunMessage(string.Format("would delete file {0}", n.File.Name));
                        }
                        catch (Exception ex)
                        {
                            m_result.AddError(string.Format("Failed to perform cleanup for extra file: {0}, message: {1}", n.File.Name, ex.Message), ex);
                            if (ex is System.Threading.ThreadAbortException)
                                throw;
                        }
							
                    foreach(var n in tp.MissingVolumes)
                    {
                        IDisposable newEntry = null;
                        
                        try
                        {  
                            if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                            {
                                backend.WaitForComplete(db, null);
                                return;
                            }    

                            progress++;
                            m_result.OperationProgressUpdater.UpdateProgress((float)progress / targetProgess);

                            if (n.Type == RemoteVolumeType.Files)
                            {
                                var filesetId = db.GetFilesetIdFromRemotename(n.Name);
                                var w = new FilesetVolumeWriter(m_options, DateTime.UtcNow);
                                newEntry = w;
                                w.SetRemoteFilename(n.Name);
								
                                db.WriteFileset(w, null, filesetId);
	
                                w.Close();
                                if (m_options.Dryrun)
                                    m_result.AddDryrunMessage(string.Format("would re-upload fileset {0}, with size {1}, previous size {2}", n.Name, Library.Utility.Utility.FormatSizeString(new System.IO.FileInfo(w.LocalFilename).Length), Library.Utility.Utility.FormatSizeString(n.Size)));
                                else
                                {
                                    db.UpdateRemoteVolume(w.RemoteFilename, RemoteVolumeState.Uploading, -1, null, null);
                                    backend.Put(w);
                                }
                            }
                            else if (n.Type == RemoteVolumeType.Index)
                            {
                                var w = new IndexVolumeWriter(m_options);
                                newEntry = w;
                                w.SetRemoteFilename(n.Name);
								
                                foreach(var blockvolume in db.GetBlockVolumesFromIndexName(n.Name))
                                {								
                                    w.StartVolume(blockvolume.Name);
                                    var volumeid = db.GetRemoteVolumeID(blockvolume.Name);
									
                                    foreach(var b in db.GetBlocks(volumeid))
                                        w.AddBlock(b.Hash, b.Size);
										
                                    w.FinishVolume(blockvolume.Hash, blockvolume.Size);
                                    
                                    if (m_options.IndexfilePolicy == Options.IndexFileStrategy.Full)
                                        foreach(var b in db.GetBlocklists(volumeid, m_options.Blocksize, hashsize))
                                            w.WriteBlocklist(b.Item1, b.Item2, 0, b.Item3);
                                }
								
                                w.Close();
								
                                if (m_options.Dryrun)
                                    m_result.AddDryrunMessage(string.Format("would re-upload index file {0}, with size {1}, previous size {2}", n.Name, Library.Utility.Utility.FormatSizeString(new System.IO.FileInfo(w.LocalFilename).Length), Library.Utility.Utility.FormatSizeString(n.Size)));
                                else
                                {
                                    db.UpdateRemoteVolume(w.RemoteFilename, RemoteVolumeState.Uploading, -1, null, null);
                                    backend.Put(w);
                                }
                            }
                            else if (n.Type == RemoteVolumeType.Blocks)
                            {
                                var w = new BlockVolumeWriter(m_options);
                                newEntry = w;
                                w.SetRemoteFilename(n.Name);
                                
                                using(var mbl = db.CreateBlockList(n.Name))
                                {
                                    //First we grab all known blocks from local files
                                    foreach(var block in mbl.GetSourceFilesWithBlocks(m_options.Blocksize))
                                    {
                                        var hash = block.Hash;
                                        var size = (int)block.Size;
                                        
                                        foreach(var source in block.Sources)
                                        {
                                            var file = source.File;
                                            var offset = source.Offset;
                                            
                                            try
                                            {
                                                if (System.IO.File.Exists(file))
                                                    using(var f = System.IO.File.OpenRead(file))
                                                    {
                                                        f.Position = offset;
                                                        if (size == Library.Utility.Utility.ForceStreamRead(f, buffer, size))
                                                        {
                                                            var newhash = Convert.ToBase64String(blockhasher.ComputeHash(buffer, 0, size));
                                                            if (newhash == hash)
                                                            {
                                                                if (mbl.SetBlockRestored(hash, size))
                                                                    w.AddBlock(hash, buffer, 0, size, Duplicati.Library.Interface.CompressionHint.Default);
                                                                break;
                                                            }
                                                        }
                                                    }
                                            }
                                            catch (Exception ex)
                                            {
                                                m_result.AddError(string.Format("Failed to access file: {0}", file), ex);
                                            }
                                        }
                                    }
                                    
                                    //Then we grab all remote volumes that have the missing blocks
                                    foreach(var vol in new AsyncDownloader(mbl.GetMissingBlockSources().ToList(), backend))
                                    {
                                        try
                                        {
                                            using(var tmpfile = vol.TempFile)
                                            using(var f = new BlockVolumeReader(RestoreHandler.GetCompressionModule(vol.Name), tmpfile, m_options))
                                                foreach(var b in f.Blocks)
                                                    if (mbl.SetBlockRestored(b.Key, b.Value))
                                                        if (f.ReadBlock(b.Key, buffer) == b.Value)
                                                            w.AddBlock(b.Key, buffer, 0, (int)b.Value, Duplicati.Library.Interface.CompressionHint.Default);
                                        }
                                        catch (Exception ex)
                                        {
                                            m_result.AddError(string.Format("Failed to access remote file: {0}", vol.Name), ex);
                                        }
                                    }
                                    
                                    // If we managed to recover all blocks, NICE!
                                    var missingBlocks = mbl.GetMissingBlocks().Count();
                                    if (missingBlocks > 0)
                                    {                                    
                                        //TODO: How do we handle this situation?
                                        m_result.AddMessage(string.Format("Repair cannot acquire {0} required blocks for volume {1}, which are required by the following filesets: ", missingBlocks, n.Name));
                                        foreach(var f in mbl.GetFilesetsUsingMissingBlocks())
                                            m_result.AddMessage(f.Name);
                                        
                                        if (!m_options.Dryrun)
                                        {
                                            m_result.AddMessage("This may be fixed by deleting the filesets and running repair again");
                                            
                                            throw new Exception(string.Format("Repair not possible, missing {0} blocks!!!", missingBlocks));
                                        }
                                    }
                                    else
                                    {
                                        if (m_options.Dryrun)
                                            m_result.AddDryrunMessage(string.Format("would re-upload block file {0}, with size {1}, previous size {2}", n.Name, Library.Utility.Utility.FormatSizeString(new System.IO.FileInfo(w.LocalFilename).Length), Library.Utility.Utility.FormatSizeString(n.Size)));
                                        else
                                        {
                                            db.UpdateRemoteVolume(w.RemoteFilename, RemoteVolumeState.Uploading, -1, null, null);
                                            backend.Put(w);
                                        }
                                    }
                                }
                            }
                        }
                        catch (Exception ex)
                        {
                            if (newEntry != null)
                                try { newEntry.Dispose(); }
                                catch { }
                                finally { newEntry = null; }
                                
                            m_result.AddError(string.Format("Failed to perform cleanup for missing file: {0}, message: {1}", n.Name, ex.Message), ex);
                            
                            if (ex is System.Threading.ThreadAbortException)
                                throw;
                        }
                    }
                }
                else
                {
                    m_result.AddMessage("Destination and database are synchronized, not making any changes");
                }

                m_result.OperationProgressUpdater.UpdateProgress(1);				
				backend.WaitForComplete(db, null);
                db.WriteResults();

			}
        }
Пример #46
0
        public void Run(string[] sources, Library.Utility.IFilter filter)
        {
            m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_Begin);

            using(m_database = new LocalBackupDatabase(m_options.Dbpath, m_options))
            {
                m_result.SetDatabase(m_database);
                m_result.Dryrun = m_options.Dryrun;

                Utility.VerifyParameters(m_database, m_options);
                m_database.VerifyConsistency(null);
                // If there is no filter, we set an empty filter to simplify the code
                // If there is a filter, we make sure that the sources are included
                m_filter = filter ?? new Library.Utility.FilterExpression();
                m_sourceFilter = new Library.Utility.FilterExpression(sources, true);

                var lastVolumeSize = -1L;
                m_backendLogFlushTimer = DateTime.Now.Add(FLUSH_TIMESPAN);
                System.Threading.Thread parallelScanner = null;

                try
                {
                    m_snapshot = GetSnapshot(sources, m_options, m_result);

                    // Start parallel scan
                    if (m_options.ChangedFilelist == null || m_options.ChangedFilelist.Length < 1)
                    {
                        parallelScanner = new System.Threading.Thread(CountFilesThread) {
                            Name = "Read ahead file counter",
                            IsBackground = true
                        };
                        parallelScanner.Start();
                    }

                    using(m_backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, m_database))
                    using(m_filesetvolume = new FilesetVolumeWriter(m_options, m_database.OperationTimestamp))
                    {
                        var incompleteFilesets = m_database.GetIncompleteFilesets(null).OrderBy(x => x.Value).ToArray();
                        if (incompleteFilesets.Length != 0)
                        {
                            m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_PreviousBackupFinalize);
                            m_result.AddMessage(string.Format("Uploading filelist from previous interrupted backup"));
                            using(var trn = m_database.BeginTransaction())
                            {
                                var incompleteSet = incompleteFilesets.Last();
                                var badIds = from n in incompleteFilesets select n.Key;

                                var prevs = (from n in m_database.FilesetTimes
                                            where
                                                n.Key < incompleteSet.Key
                                                &&
                                                !badIds.Contains(n.Key)
                                            orderby n.Key
                                            select n.Key).ToArray();

                                var prevId = prevs.Length == 0 ? -1 : prevs.Last();

                                FilesetVolumeWriter fsw = null;
                                try
                                {
                                    var s = 1;
                                    var fileTime = incompleteSet.Value + TimeSpan.FromSeconds(s);
                                    var oldFilesetID = incompleteSet.Key;

                                    // Probe for an unused filename
                                    while (s < 60)
                                    {
                                        var id = m_database.GetRemoteVolumeID(VolumeBase.GenerateFilename(RemoteVolumeType.Files, m_options, null, fileTime));
                                        if (id < 0)
                                            break;

                                        fileTime = incompleteSet.Value + TimeSpan.FromSeconds(++s);
                                    }

                                    fsw = new FilesetVolumeWriter(m_options, fileTime);
                                    fsw.VolumeID = m_database.RegisterRemoteVolume(fsw.RemoteFilename, RemoteVolumeType.Files, RemoteVolumeState.Temporary, m_transaction);
                                    var newFilesetID = m_database.CreateFileset(fsw.VolumeID, fileTime, trn);
                                    m_database.LinkFilesetToVolume(newFilesetID, fsw.VolumeID, trn);
                                    m_database.AppendFilesFromPreviousSet(trn, null, newFilesetID, prevId, fileTime);

                                    m_database.WriteFileset(fsw, trn, newFilesetID);

                                    if (m_options.Dryrun)
                                    {
                                        m_result.AddDryrunMessage(string.Format("Would upload fileset: {0}, size: {1}", fsw.RemoteFilename, Library.Utility.Utility.FormatSizeString(new FileInfo(fsw.LocalFilename).Length)));
                                    }
                                    else
                                    {
                                        m_database.UpdateRemoteVolume(fsw.RemoteFilename, RemoteVolumeState.Uploading, -1, null, trn);

                                        using(new Logging.Timer("CommitUpdateFilelistVolume"))
                                            trn.Commit();

                                        m_backend.Put(fsw);
                                        fsw = null;
                                    }
                                }
                                finally
                                {
                                    if (fsw != null)
                                        try { fsw.Dispose(); }
                                        catch { fsw = null; }
                                }
                            }
                        }

                        if (!m_options.NoBackendverification)
                        {
                            m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_PreBackupVerify);
                            using(new Logging.Timer("PreBackupVerify"))
                            {
                                try
                                {
                                    FilelistProcessor.VerifyRemoteList(m_backend, m_options, m_database, m_result.BackendWriter);
                                }
                                catch (Exception ex)
                                {
                                    if (m_options.AutoCleanup)
                                    {
                                        m_result.AddWarning("Backend verification failed, attempting automatic cleanup", ex);
                                        m_result.RepairResults = new RepairResults(m_result);
                                        new RepairHandler(m_backend.BackendUrl, m_options, (RepairResults)m_result.RepairResults).Run();

                                        m_result.AddMessage("Backend cleanup finished, retrying verification");
                                        FilelistProcessor.VerifyRemoteList(m_backend, m_options, m_database, m_result.BackendWriter);
                                    }
                                    else
                                        throw;
                                }
                            }
                        }

                        m_database.BuildLookupTable(m_options);
                        m_transaction = m_database.BeginTransaction();

                        m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_ProcessingFiles);
                        var filesetvolumeid = m_database.RegisterRemoteVolume(m_filesetvolume.RemoteFilename, RemoteVolumeType.Files, RemoteVolumeState.Temporary, m_transaction);
                        m_database.CreateFileset(filesetvolumeid, VolumeBase.ParseFilename(m_filesetvolume.RemoteFilename).Time, m_transaction);

                        m_blockvolume = new BlockVolumeWriter(m_options);
                        m_blockvolume.VolumeID = m_database.RegisterRemoteVolume(m_blockvolume.RemoteFilename, RemoteVolumeType.Blocks, RemoteVolumeState.Temporary, m_transaction);

                        if (m_options.IndexfilePolicy != Options.IndexFileStrategy.None)
                        {
                            m_indexvolume = new IndexVolumeWriter(m_options);
                            m_indexvolume.VolumeID = m_database.RegisterRemoteVolume(m_indexvolume.RemoteFilename, RemoteVolumeType.Index, RemoteVolumeState.Temporary, m_transaction);
                        }

                        var filterhandler = new FilterHandler(m_snapshot, m_attributeFilter, m_sourceFilter, m_filter, m_symlinkPolicy, m_options.HardlinkPolicy, m_result);

                        using(new Logging.Timer("BackupMainOperation"))
                        {
                            if (m_options.ChangedFilelist != null && m_options.ChangedFilelist.Length >= 1)
                            {
                                m_result.AddVerboseMessage("Processing supplied change list instead of enumerating filesystem");
                                m_result.OperationProgressUpdater.UpdatefileCount(m_options.ChangedFilelist.Length, 0, true);

                                foreach(var p in m_options.ChangedFilelist)
                                {
                                    if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                                    {
                                        m_result.AddMessage("Stopping backup operation on request");
                                        break;
                                    }

                                    FileAttributes fa = new FileAttributes();
                                    try
                                    {
                                        fa = m_snapshot.GetAttributes(p);
                                    }
                                    catch (Exception ex)
                                    {
                                        m_result.AddWarning(string.Format("Failed to read attributes: {0}, message: {1}", p, ex.Message), ex);
                                    }

                                    if (filterhandler.AttributeFilter(null, p, fa))
                                    {
                                        try
                                        {
                                            this.HandleFilesystemEntry(p, fa);
                                        }
                                        catch (Exception ex)
                                        {
                                            m_result.AddWarning(string.Format("Failed to process element: {0}, message: {1}", p, ex.Message), ex);
                                        }
                                    }
                                }

                                m_database.AppendFilesFromPreviousSet(m_transaction, m_options.DeletedFilelist);
                            }
                            else
                            {
                                foreach(var path in m_snapshot.EnumerateFilesAndFolders(filterhandler.AttributeFilter))
                                {
                                    if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                                    {
                                        m_result.AddMessage("Stopping backup operation on request");
                                        break;
                                    }

                                    this.HandleFilesystemEntry(path, m_snapshot.GetAttributes(path));
                                }

                            }

                            //If the scanner is still running for some reason, make sure we kill it now
                            if (parallelScanner != null && parallelScanner.IsAlive)
                                parallelScanner.Abort();

                            // We no longer need to snapshot active
                            try { m_snapshot.Dispose(); }
                            finally { m_snapshot = null; }

                            m_result.OperationProgressUpdater.UpdatefileCount(m_result.ExaminedFiles, m_result.SizeOfExaminedFiles, true);
                        }

                        m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_Finalize);
                        using(new Logging.Timer("FinalizeRemoteVolumes"))
                        {
                            if (m_blockvolume.SourceSize > 0)
                            {
                                lastVolumeSize = m_blockvolume.SourceSize;

                                if (m_options.Dryrun)
                                {
                                    m_result.AddDryrunMessage(string.Format("Would upload block volume: {0}, size: {1}", m_blockvolume.RemoteFilename, Library.Utility.Utility.FormatSizeString(new FileInfo(m_blockvolume.LocalFilename).Length)));
                                    if (m_indexvolume != null)
                                    {
                                        m_blockvolume.Close();
                                        UpdateIndexVolume();
                                        m_indexvolume.FinishVolume(Library.Utility.Utility.CalculateHash(m_blockvolume.LocalFilename), new FileInfo(m_blockvolume.LocalFilename).Length);
                                        m_result.AddDryrunMessage(string.Format("Would upload index volume: {0}, size: {1}", m_indexvolume.RemoteFilename, Library.Utility.Utility.FormatSizeString(new FileInfo(m_indexvolume.LocalFilename).Length)));
                                    }

                                    m_blockvolume.Dispose();
                                    m_blockvolume = null;
                                    m_indexvolume.Dispose();
                                    m_indexvolume = null;
                                }
                                else
                                {
                                    m_database.UpdateRemoteVolume(m_blockvolume.RemoteFilename, RemoteVolumeState.Uploading, -1, null, m_transaction);
                                    m_blockvolume.Close();
                                    UpdateIndexVolume();

                                    using(new Logging.Timer("CommitUpdateRemoteVolume"))
                                        m_transaction.Commit();
                                    m_transaction = m_database.BeginTransaction();

                                    m_backend.Put(m_blockvolume, m_indexvolume);

                                    m_blockvolume = null;
                                    m_indexvolume = null;
                                }
                            }
                            else
                            {
                                m_database.RemoveRemoteVolume(m_blockvolume.RemoteFilename, m_transaction);
                                if (m_indexvolume != null)
                                    m_database.RemoveRemoteVolume(m_indexvolume.RemoteFilename, m_transaction);
                            }
                        }

                        using(new Logging.Timer("UpdateChangeStatistics"))
                            m_database.UpdateChangeStatistics(m_result);
                        using(new Logging.Timer("VerifyConsistency"))
                            m_database.VerifyConsistency(m_transaction);

                        var changeCount =
                            m_result.AddedFiles + m_result.ModifiedFiles + m_result.DeletedFiles +
                            m_result.AddedFolders + m_result.ModifiedFolders + m_result.DeletedFolders +
                            m_result.AddedSymlinks + m_result.ModifiedSymlinks + m_result.DeletedSymlinks;

                        //Changes in the filelist triggers a filelist upload
                        if (m_options.UploadUnchangedBackups || changeCount > 0)
                        {
                            using(new Logging.Timer("Uploading a new fileset"))
                            {
                                if (!string.IsNullOrEmpty(m_options.ControlFiles))
                                    foreach(var p in m_options.ControlFiles.Split(new char[] { System.IO.Path.PathSeparator }, StringSplitOptions.RemoveEmptyEntries))
                                        m_filesetvolume.AddControlFile(p, m_options.GetCompressionHintFromFilename(p));

                                m_database.WriteFileset(m_filesetvolume, m_transaction);
                                m_filesetvolume.Close();

                                if (m_options.Dryrun)
                                    m_result.AddDryrunMessage(string.Format("Would upload fileset volume: {0}, size: {1}", m_filesetvolume.RemoteFilename, Library.Utility.Utility.FormatSizeString(new FileInfo(m_filesetvolume.LocalFilename).Length)));
                                else
                                {
                                    m_database.UpdateRemoteVolume(m_filesetvolume.RemoteFilename, RemoteVolumeState.Uploading, -1, null, m_transaction);

                                    using(new Logging.Timer("CommitUpdateRemoteVolume"))
                                        m_transaction.Commit();
                                    m_transaction = m_database.BeginTransaction();

                                    m_backend.Put(m_filesetvolume);
                                }
                            }
                        }
                        else
                        {
                            m_result.AddVerboseMessage("removing temp files, as no data needs to be uploaded");
                            m_database.RemoveRemoteVolume(m_filesetvolume.RemoteFilename, m_transaction);
                        }

                        m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_WaitForUpload);
                        using(new Logging.Timer("Async backend wait"))
                            m_backend.WaitForComplete(m_database, m_transaction);

                        if (m_result.TaskControlRendevouz() != TaskControlState.Stop)
                        {
                            if (m_options.KeepTime.Ticks > 0 || m_options.KeepVersions != 0)
                            {
                                m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_Delete);
                                m_result.DeleteResults = new DeleteResults(m_result);
                                using(var db = new LocalDeleteDatabase(m_database))
                                    new DeleteHandler(m_backend.BackendUrl, m_options, (DeleteResults)m_result.DeleteResults).DoRun(db, m_transaction, true, lastVolumeSize <= m_options.SmallFileSize);

                            }
                            else if (lastVolumeSize <= m_options.SmallFileSize && !m_options.NoAutoCompact)
                            {
                                m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_Compact);
                                m_result.CompactResults = new CompactResults(m_result);
                                using(var db = new LocalDeleteDatabase(m_database))
                                    new CompactHandler(m_backend.BackendUrl, m_options, (CompactResults)m_result.CompactResults).DoCompact(db, true, m_transaction);
                            }
                        }

                        if (m_options.UploadVerificationFile)
                        {
                            m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_VerificationUpload);
                            FilelistProcessor.UploadVerificationFile(m_backend.BackendUrl, m_options, m_result.BackendWriter, m_database, m_transaction);
                        }

                        if (m_options.Dryrun)
                        {
                            m_transaction.Rollback();
                            m_transaction = null;
                        }
                        else
                        {
                            using(new Logging.Timer("CommitFinalizingBackup"))
                                m_transaction.Commit();

                            m_transaction = null;
                            m_database.Vacuum();

                            if (m_result.TaskControlRendevouz() != TaskControlState.Stop && !m_options.NoBackendverification)
                            {
                                m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_PostBackupVerify);
                                using(var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, m_database))
                                {
                                    using(new Logging.Timer("AfterBackupVerify"))
                                        FilelistProcessor.VerifyRemoteList(backend, m_options, m_database, m_result.BackendWriter);
                                    backend.WaitForComplete(m_database, null);
                                }

                                if (m_options.BackupTestSampleCount > 0 && m_database.GetRemoteVolumes().Count() > 0)
                                {
                                    m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_PostBackupTest);
                                    m_result.TestResults = new TestResults(m_result);

                                    using(var testdb = new LocalTestDatabase(m_database))
                                    using(var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, testdb))
                                        new TestHandler(m_backendurl, m_options, new TestResults(m_result))
                                            .DoRun(m_options.BackupTestSampleCount, testdb, backend);
                                }
                            }

                        }

                        m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_Complete);
                        m_database.WriteResults();
                        return;
                    }
                }
                catch (Exception ex)
                {
                    m_result.AddError("Fatal error", ex);
                    throw;
                }
                finally
                {
                    if (parallelScanner != null && parallelScanner.IsAlive)
                    {
                        parallelScanner.Abort();
                        parallelScanner.Join(500);
                        if (parallelScanner.IsAlive)
                            m_result.AddWarning("Failed to terminate filecounter thread", null);
                    }

                    if (m_snapshot != null)
                        try { m_snapshot.Dispose(); }
                        catch (Exception ex) { m_result.AddError(string.Format("Failed to dispose snapshot"), ex); }
                        finally { m_snapshot = null; }

                    if (m_transaction != null)
                        try { m_transaction.Rollback(); }
                        catch (Exception ex) { m_result.AddError(string.Format("Rollback error: {0}", ex.Message), ex); }
                }
            }
        }
Пример #47
0
        public void DoRun(Database.LocalDeleteDatabase db, ref System.Data.IDbTransaction transaction, bool hasVerifiedBacked, bool forceCompact)
        {
            using (var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, db))
            {
                if (!hasVerifiedBacked && !m_options.NoBackendverification)
                {
                    FilelistProcessor.VerifyRemoteList(backend, m_options, db, m_result.BackendWriter);
                }

                var filesetNumbers = db.FilesetTimes.Zip(Enumerable.Range(0, db.FilesetTimes.Count()), (a, b) => new Tuple <long, DateTime>(b, a.Value));
                var toDelete       = m_options.GetFilesetsToDelete(db.FilesetTimes.Select(x => x.Value).ToArray());

                if (toDelete != null && toDelete.Length > 0)
                {
                    m_result.AddMessage(string.Format("Deleting {0} remote fileset(s) ...", toDelete.Length));
                }

                var lst = db.DropFilesetsFromTable(toDelete, transaction).ToArray();
                foreach (var f in lst)
                {
                    db.UpdateRemoteVolume(f.Key, RemoteVolumeState.Deleting, f.Value, null, transaction);
                }

                if (!m_options.Dryrun)
                {
                    transaction.Commit();
                    transaction = db.BeginTransaction();
                }

                foreach (var f in lst)
                {
                    if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                    {
                        backend.WaitForComplete(db, transaction);
                        return;
                    }

                    if (!m_options.Dryrun)
                    {
                        backend.Delete(f.Key, f.Value);
                    }
                    else
                    {
                        m_result.AddDryrunMessage(string.Format("Would delete remote fileset: {0}", f.Key));
                    }
                }

                backend.WaitForComplete(db, transaction);

                var count = lst.Length;
                if (!m_options.Dryrun)
                {
                    if (count == 0)
                    {
                        m_result.AddMessage("No remote filesets were deleted");
                    }
                    else
                    {
                        m_result.AddMessage(string.Format("Deleted {0} remote fileset(s)", count));
                    }
                }
                else
                {
                    if (count == 0)
                    {
                        m_result.AddDryrunMessage("No remote filesets would be deleted");
                    }
                    else
                    {
                        m_result.AddDryrunMessage(string.Format("{0} remote fileset(s) would be deleted", count));
                    }

                    if (count > 0 && m_options.Dryrun)
                    {
                        m_result.AddDryrunMessage("Remove --dry-run to actually delete files");
                    }
                }

                if (!m_options.NoAutoCompact && (forceCompact || (toDelete != null && toDelete.Length > 0)))
                {
                    m_result.CompactResults = new CompactResults(m_result);
                    new CompactHandler(m_backendurl, m_options, (CompactResults)m_result.CompactResults).DoCompact(db, true, ref transaction);
                }

                m_result.SetResults(
                    from n in filesetNumbers
                    where toDelete.Contains(n.Item2)
                    select n,
                    m_options.Dryrun);
            }
        }
Пример #48
0
        public void DoRun(Database.LocalDeleteDatabase db, ref System.Data.IDbTransaction transaction, bool hasVerifiedBacked, bool forceCompact, BackendManager sharedManager)
        {
            // Workaround where we allow a running backendmanager to be used
            using (var bk = sharedManager == null ? new BackendManager(m_backendurl, m_options, m_result.BackendWriter, db) : null)
            {
                var backend = bk ?? sharedManager;

                if (!hasVerifiedBacked && !m_options.NoBackendverification)
                {
                    FilelistProcessor.VerifyRemoteList(backend, m_options, db, m_result.BackendWriter);
                }

                var filesetNumbers = db.FilesetTimes.Zip(Enumerable.Range(0, db.FilesetTimes.Count()), (a, b) => new Tuple <long, DateTime>(b, a.Value)).ToList();
                var sets           = db.FilesetTimes.Select(x => x.Value).ToArray();
                var toDelete       = GetFilesetsToDelete(db, sets);

                if (!m_options.AllowFullRemoval && sets.Length == toDelete.Length)
                {
                    Logging.Log.WriteInformationMessage(LOGTAG, "PreventingLastFilesetRemoval", "Preventing removal of last fileset, use --{0} to allow removal ...", "allow-full-removal");
                    toDelete = toDelete.Skip(1).ToArray();
                }

                if (toDelete != null && toDelete.Length > 0)
                {
                    Logging.Log.WriteInformationMessage(LOGTAG, "DeleteRemoteFileset", "Deleting {0} remote fileset(s) ...", toDelete.Length);
                }

                var lst = db.DropFilesetsFromTable(toDelete, transaction).ToArray();
                foreach (var f in lst)
                {
                    db.UpdateRemoteVolume(f.Key, RemoteVolumeState.Deleting, f.Value, null, transaction);
                }

                if (!m_options.Dryrun)
                {
                    transaction.Commit();
                    transaction = db.BeginTransaction();
                }

                foreach (var f in lst)
                {
                    if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                    {
                        backend.WaitForComplete(db, transaction);
                        return;
                    }

                    if (!m_options.Dryrun)
                    {
                        backend.Delete(f.Key, f.Value);
                    }
                    else
                    {
                        Logging.Log.WriteDryrunMessage(LOGTAG, "WouldDeleteRemoteFileset", "Would delete remote fileset: {0}", f.Key);
                    }
                }

                if (sharedManager == null)
                {
                    backend.WaitForComplete(db, transaction);
                }
                else
                {
                    backend.WaitForEmpty(db, transaction);
                }

                var count = lst.Length;
                if (!m_options.Dryrun)
                {
                    if (count == 0)
                    {
                        Logging.Log.WriteInformationMessage(LOGTAG, "DeleteResults", "No remote filesets were deleted");
                    }
                    else
                    {
                        Logging.Log.WriteInformationMessage(LOGTAG, "DeleteResults", "Deleted {0} remote fileset(s)", count);
                    }
                }
                else
                {
                    if (count == 0)
                    {
                        Logging.Log.WriteDryrunMessage(LOGTAG, "WouldDeleteResults", "No remote filesets would be deleted");
                    }
                    else
                    {
                        Logging.Log.WriteDryrunMessage(LOGTAG, "WouldDeleteResults", "{0} remote fileset(s) would be deleted", count);
                    }

                    if (count > 0 && m_options.Dryrun)
                    {
                        Logging.Log.WriteDryrunMessage(LOGTAG, "WouldDeleteHelp", "Remove --dry-run to actually delete files");
                    }
                }

                if (!m_options.NoAutoCompact && (forceCompact || (toDelete != null && toDelete.Length > 0)))
                {
                    m_result.CompactResults = new CompactResults(m_result);
                    new CompactHandler(m_backendurl, m_options, (CompactResults)m_result.CompactResults).DoCompact(db, true, ref transaction, sharedManager);
                }

                m_result.SetResults(
                    from n in filesetNumbers
                    where toDelete.Contains(n.Item2)
                    select n,
                    m_options.Dryrun);
            }
        }
Пример #49
0
        /// <summary>
        /// Run the recreate procedure
        /// </summary>
        /// <param name="path">Path to the database that will be created</param>
        /// <param name="filelistfilter">A filter that can be used to disregard certain remote files, intended to be used to select a certain filelist</param>
        /// <param name="filenamefilter">Filters the files in a filelist to prevent downloading unwanted data</param>
        /// <param name="blockprocessor">A callback hook that can be used to work with downloaded block volumes, intended to be use to recover data blocks while processing blocklists</param>
        internal void DoRun(LocalDatabase dbparent, Library.Utility.IFilter filter = null, NumberedFilterFilelistDelegate filelistfilter = null, BlockVolumePostProcessor blockprocessor = null)
        {
            var hashalg = System.Security.Cryptography.HashAlgorithm.Create(m_options.BlockHashAlgorithm);

            if (hashalg == null)
            {
                throw new Exception(Strings.Foresthash.InvalidHashAlgorithm(m_options.BlockHashAlgorithm));
            }
            var hashsize = hashalg.HashSize / 8;

            m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Recreate_Running);

            //We build a local database in steps.
            using (var restoredb = new LocalRecreateDatabase(dbparent, m_options))
                using (var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, restoredb))
                {
                    var volumeIds = new Dictionary <string, long>();

                    var rawlist = backend.List();

                    //First step is to examine the remote storage to see what
                    // kind of data we can find
                    var remotefiles =
                        (from x in rawlist
                         let n = VolumeBase.ParseFilename(x)
                                 where
                                 n != null
                                 &&
                                 n.Prefix == m_options.Prefix
                                 select n).ToArray(); //ToArray() ensures that we do not remote-request it multiple times

                    if (remotefiles.Length == 0)
                    {
                        if (rawlist.Count == 0)
                        {
                            throw new Exception("No files were found at the remote location, perhaps the target url is incorrect?");
                        }
                        else
                        {
                            var tmp =
                                (from x in rawlist
                                 let n = VolumeBase.ParseFilename(x)
                                         where
                                         n != null
                                         select n.Prefix).ToArray();

                            var types = tmp.Distinct().ToArray();
                            if (tmp.Length == 0)
                            {
                                throw new Exception(string.Format("Found {0} files at the remote storage, but none that could be parsed", rawlist.Count));
                            }
                            else if (types.Length == 1)
                            {
                                throw new Exception(string.Format("Found {0} parse-able files with the prefix {1}, did you forget to set the backup-prefix?", tmp.Length, types[0]));
                            }
                            else
                            {
                                throw new Exception(string.Format("Found {0} parse-able files (of {1} files) with different prefixes: {2}, did you forget to set the backup-prefix?", tmp.Length, rawlist.Count, string.Join(", ", types)));
                            }
                        }
                    }

                    //Then we select the filelist we should work with,
                    // and create the filelist table to fit
                    IEnumerable <IParsedVolume> filelists =
                        from n in remotefiles
                        where n.FileType == RemoteVolumeType.Files
                        orderby n.Time descending
                        select n;

                    if (filelistfilter != null)
                    {
                        filelists = filelistfilter(filelists).Select(x => x.Value).ToArray();
                    }

                    foreach (var fl in remotefiles)
                    {
                        volumeIds[fl.File.Name] = restoredb.RegisterRemoteVolume(fl.File.Name, fl.FileType, RemoteVolumeState.Uploaded);
                    }


                    //Record all blocksets and files needed
                    using (var tr = restoredb.BeginTransaction())
                    {
                        var filelistWork = (from n in filelists orderby n.Time select new RemoteVolume(n.File) as IRemoteVolume).ToList();
                        var progress     = 0;

                        foreach (var entry in new AsyncDownloader(filelistWork, backend))
                        {
                            try
                            {
                                if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                                {
                                    backend.WaitForComplete(restoredb, null);
                                    return;
                                }

                                progress++;
                                m_result.OperationProgressUpdater.UpdateProgress(((float)progress / filelistWork.Count()) * 0.2f);

                                using (var tmpfile = entry.TempFile)
                                {
                                    if (entry.Hash != null && entry.Size > 0)
                                    {
                                        restoredb.UpdateRemoteVolume(entry.Name, RemoteVolumeState.Verified, entry.Size, entry.Hash, tr);
                                    }

                                    var parsed = VolumeBase.ParseFilename(entry.Name);
                                    // Create timestamped operations based on the file timestamp
                                    var filesetid = restoredb.CreateFileset(volumeIds[entry.Name], parsed.Time, tr);
                                    using (var filelistreader = new FilesetVolumeReader(parsed.CompressionModule, tmpfile, m_options))
                                        foreach (var fe in filelistreader.Files.Where(x => Library.Utility.FilterExpression.Matches(filter, x.Path)))
                                        {
                                            try
                                            {
                                                if (fe.Type == FilelistEntryType.Folder)
                                                {
                                                    restoredb.AddDirectoryEntry(filesetid, fe.Path, fe.Time, fe.Metahash, fe.Metahash == null ? -1 : fe.Metasize, tr);
                                                }
                                                else if (fe.Type == FilelistEntryType.File)
                                                {
                                                    var blocksetid = restoredb.AddBlockset(fe.Hash, fe.Size, fe.BlocklistHashes, tr);
                                                    restoredb.AddFileEntry(filesetid, fe.Path, fe.Time, blocksetid, fe.Metahash, fe.Metahash == null ? -1 : fe.Metasize, tr);
                                                }
                                                else if (fe.Type == FilelistEntryType.Symlink)
                                                {
                                                    restoredb.AddSymlinkEntry(filesetid, fe.Path, fe.Time, fe.Metahash, fe.Metahash == null ? -1 : fe.Metasize, tr);
                                                }
                                                else
                                                {
                                                    m_result.AddWarning(string.Format("Skipping file-entry with unknown type {0}: {1} ", fe.Type, fe.Path), null);
                                                }
                                            }
                                            catch (Exception ex)
                                            {
                                                m_result.AddWarning(string.Format("Failed to process file-entry: {0}", fe.Path), ex);
                                            }
                                        }
                                }
                            }
                            catch (Exception ex)
                            {
                                m_result.AddWarning(string.Format("Failed to process file: {0}", entry.Name), ex);
                                if (ex is System.Threading.ThreadAbortException)
                                {
                                    throw;
                                }
                            }
                        }

                        using (new Logging.Timer("CommitUpdateFilesetFromRemote"))
                            tr.Commit();
                    }

                    //Grab all index files, and update the block table
                    using (var tr = restoredb.BeginTransaction())
                    {
                        var indexfiles = (
                            from n in remotefiles
                            where n.FileType == RemoteVolumeType.Index
                            select new RemoteVolume(n.File) as IRemoteVolume).ToList();

                        var progress = 0;

                        foreach (var sf in new AsyncDownloader(indexfiles, backend))
                        {
                            try
                            {
                                if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                                {
                                    backend.WaitForComplete(restoredb, null);
                                    return;
                                }

                                progress++;
                                m_result.OperationProgressUpdater.UpdateProgress((((float)progress / indexfiles.Count) * 0.5f) + 0.2f);

                                using (var tmpfile = sf.TempFile)
                                {
                                    if (sf.Hash != null && sf.Size > 0)
                                    {
                                        restoredb.UpdateRemoteVolume(sf.Name, RemoteVolumeState.Verified, sf.Size, sf.Hash, tr);
                                    }

                                    using (var svr = new IndexVolumeReader(RestoreHandler.GetCompressionModule(sf.Name), tmpfile, m_options, hashsize))
                                    {
                                        Utility.VerifyParameters(restoredb, m_options);

                                        foreach (var a in svr.Volumes)
                                        {
                                            var volumeID = restoredb.GetRemoteVolumeID(a.Filename);
                                            //Add all block/volume mappings
                                            foreach (var b in a.Blocks)
                                            {
                                                restoredb.UpdateBlock(b.Key, b.Value, volumeID, tr);
                                            }

                                            restoredb.UpdateRemoteVolume(a.Filename, RemoteVolumeState.Verified, a.Length, a.Hash, tr);
                                            restoredb.AddIndexBlockLink(restoredb.GetRemoteVolumeID(sf.Name), volumeID, tr);
                                        }

                                        //If there are blocklists in the index file, update the blocklists
                                        foreach (var b in svr.BlockLists)
                                        {
                                            restoredb.UpdateBlockset(b.Hash, b.Blocklist, tr);
                                        }
                                    }
                                }
                            }
                            catch (Exception ex)
                            {
                                //Not fatal
                                m_result.AddWarning(string.Format("Failed to process index file: {0}", sf.Name), ex);
                                if (ex is System.Threading.ThreadAbortException)
                                {
                                    throw;
                                }
                            }
                        }

                        using (new Logging.Timer("CommitRecreatedDb"))
                            tr.Commit();

                        // TODO: In some cases, we can avoid downloading all index files,
                        // if we are lucky and pick the right ones
                    }

                    // We have now grabbed as much information as possible,
                    // if we are still missing data, we must now fetch block files
                    restoredb.FindMissingBlocklistHashes(hashsize, null);

                    //We do this in three passes
                    for (var i = 0; i < 3; i++)
                    {
                        // Grab the list matching the pass type
                        var lst = restoredb.GetMissingBlockListVolumes(i).ToList();
                        if (lst.Count > 0)
                        {
                            switch (i)
                            {
                            case 0:
                                if (m_options.Verbose)
                                {
                                    m_result.AddVerboseMessage("Processing required {0} blocklist volumes: {1}", lst.Count, string.Join(", ", lst.Select(x => x.Name)));
                                }
                                else
                                {
                                    m_result.AddMessage(string.Format("Processing required {0} blocklist volumes", lst.Count));
                                }
                                break;

                            case 1:
                                if (m_options.Verbose)
                                {
                                    m_result.AddVerboseMessage("Probing {0} candidate blocklist volumes: {1}", lst.Count, string.Join(", ", lst.Select(x => x.Name)));
                                }
                                else
                                {
                                    m_result.AddMessage(string.Format("Probing {0} candidate blocklist volumes", lst.Count));
                                }
                                break;

                            default:
                                if (m_options.Verbose)
                                {
                                    m_result.AddVerboseMessage("Processing all of the {0} volumes for blocklists: {1}", lst.Count, string.Join(", ", lst.Select(x => x.Name)));
                                }
                                else
                                {
                                    m_result.AddMessage(string.Format("Processing all of the {0} volumes for blocklists", lst.Count));
                                }
                                break;
                            }
                        }

                        var progress = 0;
                        foreach (var sf in new AsyncDownloader(lst, backend))
                        {
                            using (var tmpfile = sf.TempFile)
                                using (var rd = new BlockVolumeReader(RestoreHandler.GetCompressionModule(sf.Name), tmpfile, m_options))
                                    using (var tr = restoredb.BeginTransaction())
                                    {
                                        if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                                        {
                                            backend.WaitForComplete(restoredb, null);
                                            return;
                                        }

                                        progress++;
                                        m_result.OperationProgressUpdater.UpdateProgress((((float)progress / lst.Count) * 0.1f) + 0.7f + (i * 0.1f));

                                        var volumeid = restoredb.GetRemoteVolumeID(sf.Name);

                                        restoredb.UpdateRemoteVolume(sf.Name, RemoteVolumeState.Uploaded, sf.Size, sf.Hash, tr);

                                        // Update the block table so we know about the block/volume map
                                        foreach (var h in rd.Blocks)
                                        {
                                            restoredb.UpdateBlock(h.Key, h.Value, volumeid, tr);
                                        }

                                        // Grab all known blocklists from the volume
                                        foreach (var blocklisthash in restoredb.GetBlockLists(volumeid))
                                        {
                                            restoredb.UpdateBlockset(blocklisthash, rd.ReadBlocklist(blocklisthash, hashsize), tr);
                                        }

                                        // Update tables so we know if we are done
                                        restoredb.FindMissingBlocklistHashes(hashsize, tr);

                                        using (new Logging.Timer("CommitRestoredBlocklist"))
                                            tr.Commit();

                                        //At this point we can patch files with data from the block volume
                                        if (blockprocessor != null)
                                        {
                                            blockprocessor(sf.Name, rd);
                                        }
                                    }
                        }
                    }

                    backend.WaitForComplete(restoredb, null);

                    //All done, we must verify that we have all blocklist fully intact
                    // if this fails, the db will not be deleted, so it can be used,
                    // except to continue a backup
                    restoredb.VerifyConsistency(null, m_options.Blocksize, m_options.BlockhashSize);
                }
        }
Пример #50
0
        private long FinalizeRemoteVolumes(BackendManager backend)
        {
            var lastVolumeSize = -1L;
            m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_Finalize);
            using(new Logging.Timer("FinalizeRemoteVolumes"))
            {
                if (m_blockvolume != null && m_blockvolume.SourceSize > 0)
                {
                    lastVolumeSize = m_blockvolume.SourceSize;

                    if (m_options.Dryrun)
                    {
                        m_result.AddDryrunMessage(string.Format("Would upload block volume: {0}, size: {1}", m_blockvolume.RemoteFilename, Library.Utility.Utility.FormatSizeString(new FileInfo(m_blockvolume.LocalFilename).Length)));
                        if (m_indexvolume != null)
                        {
                            m_blockvolume.Close();
                            UpdateIndexVolume();
                            m_indexvolume.FinishVolume(Library.Utility.Utility.CalculateHash(m_blockvolume.LocalFilename), new FileInfo(m_blockvolume.LocalFilename).Length);
                            m_result.AddDryrunMessage(string.Format("Would upload index volume: {0}, size: {1}", m_indexvolume.RemoteFilename, Library.Utility.Utility.FormatSizeString(new FileInfo(m_indexvolume.LocalFilename).Length)));
                        }

                        m_blockvolume.Dispose();
                        m_blockvolume = null;
                        m_indexvolume.Dispose();
                        m_indexvolume = null;
                    }
                    else
                    {
                        m_database.UpdateRemoteVolume(m_blockvolume.RemoteFilename, RemoteVolumeState.Uploading, -1, null, m_transaction);
                        m_blockvolume.Close();
                        UpdateIndexVolume();

                        using(new Logging.Timer("CommitUpdateRemoteVolume"))
                            m_transaction.Commit();
                        m_transaction = m_database.BeginTransaction();

                        backend.Put(m_blockvolume, m_indexvolume);

                        using(new Logging.Timer("CommitUpdateRemoteVolume"))
                            m_transaction.Commit();
                        m_transaction = m_database.BeginTransaction();

                        m_blockvolume = null;
                        m_indexvolume = null;
                    }
                }
            }

            return lastVolumeSize;
        }
Пример #51
0
        public void Run(IEnumerable <string> filterstrings = null, Library.Utility.IFilter compositefilter = null)
        {
            using (var tmpdb = new Library.Utility.TempFile())
                using (var db = new Database.LocalDatabase(System.IO.File.Exists(m_options.Dbpath) ? m_options.Dbpath : (string)tmpdb, "ListControlFiles", true))
                    using (var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, db))
                    {
                        m_result.SetDatabase(db);

                        var filter = Library.Utility.JoinedFilterExpression.Join(new Library.Utility.FilterExpression(filterstrings), compositefilter);

                        try
                        {
                            var filteredList = ListFilesHandler.ParseAndFilterFilesets(backend.List(), m_options);
                            if (filteredList.Count == 0)
                            {
                                throw new Exception("No filesets found on remote target");
                            }

                            Exception lastEx = new Exception("No suitable files found on remote target");

                            foreach (var fileversion in filteredList)
                            {
                                try
                                {
                                    if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                                    {
                                        return;
                                    }

                                    var file  = fileversion.Value.File;
                                    var entry = db.GetRemoteVolume(file.Name);

                                    var files = new List <Library.Interface.IListResultFile>();
                                    using (var tmpfile = backend.Get(file.Name, entry.Size < 0 ? file.Size : entry.Size, entry.Hash))
                                        using (var tmp = new Volumes.FilesetVolumeReader(RestoreHandler.GetCompressionModule(file.Name), tmpfile, m_options))
                                            foreach (var cf in tmp.ControlFiles)
                                            {
                                                if (Library.Utility.FilterExpression.Matches(filter, cf.Key))
                                                {
                                                    files.Add(new ListResultFile(cf.Key, null));
                                                }
                                            }

                                    m_result.SetResult(new Library.Interface.IListResultFileset[] { new ListResultFileset(fileversion.Key, LocalDatabase.BackupType.PARTIAL_BACKUP, fileversion.Value.Time, -1, -1) }, files);
                                    lastEx = null;
                                    break;
                                }
                                catch (Exception ex)
                                {
                                    lastEx = ex;
                                    if (ex is System.Threading.ThreadAbortException)
                                    {
                                        throw;
                                    }
                                }
                            }

                            if (lastEx != null)
                            {
                                throw lastEx;
                            }
                        }
                        finally
                        {
                            backend.WaitForComplete(db, null);
                        }
                    }
        }
Пример #52
0
        public void Run(string baseVersion, string compareVersion, IEnumerable <string> filterstrings = null, Library.Utility.IFilter compositefilter = null, Action <IListChangesResults, IEnumerable <Tuple <Library.Interface.ListChangesChangeType, Library.Interface.ListChangesElementType, string> > > callback = null)
        {
            var filter = Library.Utility.JoinedFilterExpression.Join(new Library.Utility.FilterExpression(filterstrings), compositefilter);

            var useLocalDb = !m_options.NoLocalDb && System.IO.File.Exists(m_options.Dbpath);

            baseVersion    = string.IsNullOrEmpty(baseVersion) ? "1" : baseVersion;
            compareVersion = string.IsNullOrEmpty(compareVersion) ? "0" : compareVersion;

            long baseVersionIndex    = -1;
            long compareVersionIndex = -1;

            DateTime baseVersionTime    = new DateTime(0);
            DateTime compareVersionTime = new DateTime(0);

            using (var tmpdb = useLocalDb ? null : new Library.Utility.TempFile())
                using (var db = new Database.LocalListChangesDatabase(useLocalDb ? m_options.Dbpath : (string)tmpdb))
                    using (var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, db))
                        using (var storageKeeper = db.CreateStorageHelper())
                        {
                            m_result.SetDatabase(db);

                            if (useLocalDb)
                            {
                                var dbtimes = db.FilesetTimes.ToList();
                                if (dbtimes.Count < 2)
                                {
                                    throw new UserInformationException(string.Format("Need at least two backups to show differences, database contains {0} backups", dbtimes.Count), "NeedTwoBackupsToStartDiff");
                                }

                                long baseVersionId;
                                long compareVersionId;

                                var times = dbtimes.Zip(Enumerable.Range(0, dbtimes.Count), (a, b) => new Tuple <long, DateTime, long>(b, a.Value, a.Key)).ToList();
                                var bt    = SelectTime(baseVersion, times, out baseVersionIndex, out baseVersionTime, out baseVersionId);
                                times.Remove(bt);
                                SelectTime(compareVersion, times, out compareVersionIndex, out compareVersionTime, out compareVersionId);

                                storageKeeper.AddFromDb(baseVersionId, false, filter);
                                storageKeeper.AddFromDb(compareVersionId, true, filter);
                            }
                            else
                            {
                                Logging.Log.WriteInformationMessage(LOGTAG, "NoLocalDatabase", "No local database, accessing remote store");

                                var parsedlist = (from n in backend.List()
                                                  let p = Volumes.VolumeBase.ParseFilename(n)
                                                          where p != null && p.FileType == RemoteVolumeType.Files
                                                          orderby p.Time descending
                                                          select p).ToArray();

                                var numberedList = parsedlist.Zip(Enumerable.Range(0, parsedlist.Length), (a, b) => new Tuple <long, DateTime, Volumes.IParsedVolume>(b, a.Time, a)).ToList();
                                if (numberedList.Count < 2)
                                {
                                    throw new UserInformationException(string.Format("Need at least two backups to show differences, database contains {0} backups", numberedList.Count), "NeedTwoBackupsToStartDiff");
                                }

                                Volumes.IParsedVolume baseFile;
                                Volumes.IParsedVolume compareFile;

                                var bt = SelectTime(baseVersion, numberedList, out baseVersionIndex, out baseVersionTime, out baseFile);
                                numberedList.Remove(bt);
                                SelectTime(compareVersion, numberedList, out compareVersionIndex, out compareVersionTime, out compareFile);

                                Func <FilelistEntryType, Library.Interface.ListChangesElementType> conv = (x) => {
                                    switch (x)
                                    {
                                    case FilelistEntryType.File:
                                        return(Library.Interface.ListChangesElementType.File);

                                    case FilelistEntryType.Folder:
                                        return(Library.Interface.ListChangesElementType.Folder);

                                    case FilelistEntryType.Symlink:
                                        return(Library.Interface.ListChangesElementType.Symlink);

                                    default:
                                        return((Library.Interface.ListChangesElementType)(-1));
                                    }
                                };

                                if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                                {
                                    return;
                                }

                                using (var tmpfile = backend.Get(baseFile.File.Name, baseFile.File.Size, null))
                                    using (var rd = new Volumes.FilesetVolumeReader(RestoreHandler.GetCompressionModule(baseFile.File.Name), tmpfile, m_options))
                                        foreach (var f in rd.Files)
                                        {
                                            if (Library.Utility.FilterExpression.Matches(filter, f.Path))
                                            {
                                                storageKeeper.AddElement(f.Path, f.Hash, f.Metahash, f.Size, conv(f.Type), false);
                                            }
                                        }

                                if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                                {
                                    return;
                                }

                                using (var tmpfile = backend.Get(compareFile.File.Name, compareFile.File.Size, null))
                                    using (var rd = new Volumes.FilesetVolumeReader(RestoreHandler.GetCompressionModule(compareFile.File.Name), tmpfile, m_options))
                                        foreach (var f in rd.Files)
                                        {
                                            if (Library.Utility.FilterExpression.Matches(filter, f.Path))
                                            {
                                                storageKeeper.AddElement(f.Path, f.Hash, f.Metahash, f.Size, conv(f.Type), true);
                                            }
                                        }
                            }

                            var changes = storageKeeper.CreateChangeCountReport();
                            var sizes   = storageKeeper.CreateChangeSizeReport();

                            var lst = (m_options.FullResult || callback != null) ?
                                      (from n in storageKeeper.CreateChangedFileReport()
                                       select n) : null;

                            m_result.SetResult(
                                baseVersionTime, baseVersionIndex, compareVersionTime, compareVersionIndex,
                                changes.AddedFolders, changes.AddedSymlinks, changes.AddedFiles,
                                changes.DeletedFolders, changes.DeletedSymlinks, changes.DeletedFiles,
                                changes.ModifiedFolders, changes.ModifiedSymlinks, changes.ModifiedFiles,
                                sizes.AddedSize, sizes.DeletedSize, sizes.PreviousSize, sizes.CurrentSize,
                                (lst == null || callback == null) ? null : lst.ToArray()
                                );

                            if (callback != null)
                            {
                                callback(m_result, lst);
                            }

                            return;
                        }
        }
Пример #53
0
        private async Task RunAsync(string[] sources, Library.Utility.IFilter filter)
        {
            m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_Begin);

            // New isolated scope for each operation
            using (new IsolatedChannelScope())
                using (m_database = new LocalBackupDatabase(m_options.Dbpath, m_options))
                {
                    m_result.SetDatabase(m_database);
                    m_result.Dryrun = m_options.Dryrun;

                    // Check the database integrity
                    Utility.UpdateOptionsFromDb(m_database, m_options);
                    Utility.VerifyParameters(m_database, m_options);

                    var probe_path = m_database.GetFirstPath();
                    if (probe_path != null && Duplicati.Library.Utility.Utility.GuessDirSeparator(probe_path) != System.IO.Path.DirectorySeparatorChar.ToString())
                    {
                        throw new UserInformationException(string.Format("The backup contains files that belong to another operating system. Proceeding with a backup would cause the database to contain paths from two different operation systems, which is not supported. To proceed without losing remote data, delete all filesets and make sure the --{0} option is set, then run the backup again to re-use the existing data on the remote store.", "no-auto-compact"), "CrossOsDatabaseReuseNotSupported");
                    }

                    if (m_database.PartiallyRecreated)
                    {
                        throw new UserInformationException("The database was only partially recreated. This database may be incomplete and the repair process is not allowed to alter remote files as that could result in data loss.", "DatabaseIsPartiallyRecreated");
                    }

                    if (m_database.RepairInProgress)
                    {
                        throw new UserInformationException("The database was attempted repaired, but the repair did not complete. This database may be incomplete and the backup process cannot continue. You may delete the local database and attempt to repair it again.", "DatabaseRepairInProgress");
                    }

                    // If there is no filter, we set an empty filter to simplify the code
                    // If there is a filter, we make sure that the sources are included
                    m_filter       = filter ?? new Library.Utility.FilterExpression();
                    m_sourceFilter = new Library.Utility.FilterExpression(sources, true);

                    Task parallelScanner = null;
                    Task uploader        = null;
                    try
                    {
                        // Setup runners and instances here
                        using (var db = new Backup.BackupDatabase(m_database, m_options))
                            using (var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, m_database))
                                using (var filesetvolume = new FilesetVolumeWriter(m_options, m_database.OperationTimestamp))
                                    using (var stats = new Backup.BackupStatsCollector(m_result))
                                        using (var bk = new Common.BackendHandler(m_options, m_backendurl, db, stats, m_result.TaskReader))
                                            // Keep a reference to these channels to avoid shutdown
                                            using (var uploadtarget = ChannelManager.GetChannel(Backup.Channels.BackendRequest.ForWrite))
                                            {
                                                long filesetid;
                                                var  counterToken = new CancellationTokenSource();
                                                using (var snapshot = GetSnapshot(sources, m_options))
                                                {
                                                    try
                                                    {
                                                        // Start parallel scan, or use the database
                                                        if (m_options.DisableFileScanner)
                                                        {
                                                            var d = m_database.GetLastBackupFileCountAndSize();
                                                            m_result.OperationProgressUpdater.UpdatefileCount(d.Item1, d.Item2, true);
                                                        }
                                                        else
                                                        {
                                                            parallelScanner = Backup.CountFilesHandler.Run(sources, snapshot, m_result, m_options, m_sourceFilter, m_filter, m_result.TaskReader, counterToken.Token);
                                                        }

                                                        // Make sure the database is sane
                                                        await db.VerifyConsistencyAsync(m_options.Blocksize, m_options.BlockhashSize, true);

                                                        // Start the uploader process
                                                        uploader = Backup.BackendUploader.Run(bk, m_options, db, m_result, m_result.TaskReader, stats);

                                                        // If we have an interrupted backup, grab the
                                                        string lasttempfilelist = null;
                                                        long   lasttempfileid   = -1;
                                                        if (!m_options.DisableSyntheticFilelist)
                                                        {
                                                            var candidates = (await db.GetIncompleteFilesetsAsync()).OrderBy(x => x.Value).ToArray();
                                                            if (candidates.Length > 0)
                                                            {
                                                                lasttempfileid   = candidates.Last().Key;
                                                                lasttempfilelist = m_database.GetRemoteVolumeFromID(lasttempfileid).Name;
                                                            }
                                                        }

                                                        // TODO: Rewrite to using the uploader process, or the BackendHandler interface
                                                        // Do a remote verification, unless disabled
                                                        PreBackupVerify(backend, lasttempfilelist);

                                                        // If the previous backup was interrupted, send a synthetic list
                                                        await Backup.UploadSyntheticFilelist.Run(db, m_options, m_result, m_result.TaskReader, lasttempfilelist, lasttempfileid);

                                                        // Grab the previous backup ID, if any
                                                        var prevfileset = m_database.FilesetTimes.FirstOrDefault();
                                                        if (prevfileset.Value.ToUniversalTime() > m_database.OperationTimestamp.ToUniversalTime())
                                                        {
                                                            throw new Exception(string.Format("The previous backup has time {0}, but this backup has time {1}. Something is wrong with the clock.", prevfileset.Value.ToLocalTime(), m_database.OperationTimestamp.ToLocalTime()));
                                                        }

                                                        var lastfilesetid = prevfileset.Value.Ticks == 0 ? -1 : prevfileset.Key;

                                                        // Rebuild any index files that are missing
                                                        await Backup.RecreateMissingIndexFiles.Run(db, m_options, m_result, m_result.TaskReader);

                                                        // This should be removed as the lookups are no longer used
                                                        m_database.BuildLookupTable(m_options);

                                                        // Prepare the operation by registering the filelist
                                                        m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_ProcessingFiles);

                                                        var repcnt = 0;
                                                        while (repcnt < 100 && await db.GetRemoteVolumeIDAsync(filesetvolume.RemoteFilename) >= 0)
                                                        {
                                                            filesetvolume.ResetRemoteFilename(m_options, m_database.OperationTimestamp.AddSeconds(repcnt++));
                                                        }

                                                        if (await db.GetRemoteVolumeIDAsync(filesetvolume.RemoteFilename) >= 0)
                                                        {
                                                            throw new Exception("Unable to generate a unique fileset name");
                                                        }

                                                        var filesetvolumeid = await db.RegisterRemoteVolumeAsync(filesetvolume.RemoteFilename, RemoteVolumeType.Files, RemoteVolumeState.Temporary);

                                                        filesetid = await db.CreateFilesetAsync(filesetvolumeid, VolumeBase.ParseFilename(filesetvolume.RemoteFilename).Time);

                                                        // create USN-based scanner if enabled
                                                        var journalService = GetJournalService(sources, snapshot, filter, lastfilesetid);

                                                        // Run the backup operation
                                                        if (await m_result.TaskReader.ProgressAsync)
                                                        {
                                                            await RunMainOperation(sources, snapshot, journalService, db, stats, m_options, m_sourceFilter, m_filter, m_result, m_result.TaskReader, lastfilesetid).ConfigureAwait(false);
                                                        }
                                                    }
                                                    finally
                                                    {
                                                        //If the scanner is still running for some reason, make sure we kill it now
                                                        counterToken.Cancel();
                                                    }
                                                }

                                                // Ensure the database is in a sane state after adding data
                                                using (new Logging.Timer(LOGTAG, "VerifyConsistency", "VerifyConsistency"))
                                                    await db.VerifyConsistencyAsync(m_options.Blocksize, m_options.BlockhashSize, false);

                                                // Send the actual filelist
                                                if (await m_result.TaskReader.ProgressAsync)
                                                {
                                                    await Backup.UploadRealFilelist.Run(m_result, db, m_options, filesetvolume, filesetid, m_result.TaskReader);
                                                }

                                                // Wait for upload completion
                                                m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_WaitForUpload);
                                                var lastVolumeSize = await FlushBackend(m_result, uploadtarget, uploader).ConfigureAwait(false);

                                                // Make sure we have the database up-to-date
                                                await db.CommitTransactionAsync("CommitAfterUpload", false);

                                                // TODO: Remove this later
                                                m_transaction = m_database.BeginTransaction();

                                                if (await m_result.TaskReader.ProgressAsync)
                                                {
                                                    CompactIfRequired(backend, lastVolumeSize);
                                                }

                                                if (m_options.UploadVerificationFile && await m_result.TaskReader.ProgressAsync)
                                                {
                                                    m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_VerificationUpload);
                                                    FilelistProcessor.UploadVerificationFile(backend.BackendUrl, m_options, m_result.BackendWriter, m_database, m_transaction);
                                                }

                                                if (m_options.Dryrun)
                                                {
                                                    m_transaction.Rollback();
                                                    m_transaction = null;
                                                }
                                                else
                                                {
                                                    using (new Logging.Timer(LOGTAG, "CommitFinalizingBackup", "CommitFinalizingBackup"))
                                                        m_transaction.Commit();

                                                    m_transaction = null;

                                                    if (m_result.TaskControlRendevouz() != TaskControlState.Stop)
                                                    {
                                                        if (m_options.NoBackendverification)
                                                        {
                                                            UpdateStorageStatsFromDatabase();
                                                        }
                                                        else
                                                        {
                                                            PostBackupVerification();
                                                        }
                                                    }
                                                }

                                                m_database.WriteResults();
                                                m_database.PurgeLogData(m_options.LogRetention);
                                                if (m_options.AutoVacuum)
                                                {
                                                    m_database.Vacuum();
                                                }
                                                m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_Complete);
                                                return;
                                            }
                    }
                    catch (Exception ex)
                    {
                        var aex = BuildException(ex, uploader, parallelScanner);
                        Logging.Log.WriteErrorMessage(LOGTAG, "FatalError", ex, "Fatal error");
                        if (aex == ex)
                        {
                            throw;
                        }

                        throw aex;
                    }
                    finally
                    {
                        if (parallelScanner != null && !parallelScanner.IsCompleted)
                        {
                            parallelScanner.Wait(500);
                        }

                        // TODO: We want to commit? always?
                        if (m_transaction != null)
                        {
                            try { m_transaction.Rollback(); }
                            catch (Exception ex) { Logging.Log.WriteErrorMessage(LOGTAG, "RollbackError", ex, "Rollback error: {0}", ex.Message); }
                        }
                    }
                }
        }
Пример #54
0
        public void Run(Library.Utility.IFilter filter)
        {
            if (!System.IO.File.Exists(m_options.Dbpath))
            {
                throw new UserInformationException(string.Format("Database file does not exist: {0}", m_options.Dbpath));
            }

            if (filter != null && !filter.Empty)
            {
                throw new UserInformationException("Filters are not supported for this operation");
            }

            List <Database.RemoteVolumeEntry> missing = null;

            using (var db = new Database.LocalListBrokenFilesDatabase(m_options.Dbpath))
                using (var tr = db.BeginTransaction())
                {
                    if (db.PartiallyRecreated)
                    {
                        throw new UserInformationException("The command does not work on partially recreated databases");
                    }

                    var sets = ListBrokenFilesHandler.GetBrokenFilesetsFromRemote(m_backendurl, m_result, db, tr, m_options, out missing);
                    if (sets == null)
                    {
                        return;
                    }

                    if (sets.Length == 0)
                    {
                        if (missing == null)
                        {
                            m_result.AddMessage("Found no broken filesets");
                        }
                        else if (missing.Count == 0)
                        {
                            m_result.AddMessage("Found no broken filesets and no missing remote files");
                        }
                        else
                        {
                            throw new UserInformationException(string.Format("Found no broken filesets, but {0} missing remote files", sets.Length));
                        }
                    }

                    m_result.AddMessage(string.Format("Found {0} broken filesets with {1} affected files, purging files", sets.Length, sets.Sum(x => x.Item3)));

                    var pgoffset = 0.0f;
                    var pgspan   = 0.95f / sets.Length;

                    var filesets = db.FilesetTimes.ToList();

                    var compare_list = sets.Select(x => new
                    {
                        FilesetID   = x.Item2,
                        Timestamp   = x.Item1,
                        RemoveCount = x.Item3,
                        Version     = filesets.FindIndex(y => y.Key == x.Item2),
                        SetCount    = db.GetFilesetFileCount(x.Item2, tr)
                    }).ToArray();

                    var fully_emptied = compare_list.Where(x => x.RemoveCount == x.SetCount).ToArray();
                    var to_purge      = compare_list.Where(x => x.RemoveCount != x.SetCount).ToArray();

                    if (fully_emptied.Length != 0)
                    {
                        if (fully_emptied.Length == 1)
                        {
                            m_result.AddMessage(string.Format("Removing entire fileset {1} as all {0} file(s) are broken", fully_emptied.First().Timestamp, fully_emptied.First().RemoveCount));
                        }
                        else
                        {
                            m_result.AddMessage(string.Format("Removing {0} filesets where all file(s) are broken: {1}", fully_emptied.Length, string.Join(", ", fully_emptied.Select(x => x.Timestamp.ToLocalTime().ToString()))));
                        }

                        m_result.DeleteResults = new DeleteResults(m_result);
                        using (var rmdb = new Database.LocalDeleteDatabase(db))
                        {
                            var deltr = rmdb.BeginTransaction();
                            try
                            {
                                var opts = new Options(new Dictionary <string, string>(m_options.RawOptions));
                                opts.RawOptions["version"] = string.Join(",", fully_emptied.Select(x => x.Version.ToString()));
                                opts.RawOptions.Remove("time");
                                opts.RawOptions["no-auto-compact"] = "true";

                                new DeleteHandler(m_backendurl, opts, (DeleteResults)m_result.DeleteResults)
                                .DoRun(rmdb, ref deltr, true, false);

                                if (!m_options.Dryrun)
                                {
                                    using (new Logging.Timer("CommitDelete"))
                                        deltr.Commit();

                                    rmdb.WriteResults();
                                }
                                else
                                {
                                    deltr.Rollback();
                                }
                            }
                            finally
                            {
                                if (deltr != null)
                                {
                                    try { deltr.Rollback(); }
                                    catch { }
                                }
                            }
                        }

                        pgoffset += (pgspan * fully_emptied.Length);
                        m_result.OperationProgressUpdater.UpdateProgress(pgoffset);
                    }

                    if (to_purge.Length > 0)
                    {
                        m_result.PurgeResults = new PurgeFilesResults(m_result);

                        foreach (var bs in to_purge)
                        {
                            m_result.AddMessage(string.Format("Purging {0} file(s) from fileset {1}", bs.RemoveCount, bs.Timestamp.ToLocalTime()));
                            var opts = new Options(new Dictionary <string, string>(m_options.RawOptions));

                            using (var pgdb = new Database.LocalPurgeDatabase(db))
                            {
                                // Recompute the version number after we deleted the versions before
                                filesets = pgdb.FilesetTimes.ToList();
                                var thisversion = filesets.FindIndex(y => y.Key == bs.FilesetID);
                                if (thisversion < 0)
                                {
                                    throw new Exception(string.Format("Failed to find match for {0} ({1}) in {2}", bs.FilesetID, bs.Timestamp.ToLocalTime(), string.Join(", ", filesets.Select(x => x.ToString()))));
                                }

                                opts.RawOptions["version"] = thisversion.ToString();
                                opts.RawOptions.Remove("time");
                                opts.RawOptions["no-auto-compact"] = "true";

                                new PurgeFilesHandler(m_backendurl, opts, (PurgeFilesResults)m_result.PurgeResults).Run(pgdb, pgoffset, pgspan, (cmd, filesetid, tablename) =>
                                {
                                    if (filesetid != bs.FilesetID)
                                    {
                                        throw new Exception(string.Format("Unexpected filesetid: {0}, expected {1}", filesetid, bs.FilesetID));
                                    }
                                    db.InsertBrokenFileIDsIntoTable(filesetid, tablename, "FileID", cmd.Transaction);
                                });
                            }

                            pgoffset += pgspan;
                            m_result.OperationProgressUpdater.UpdateProgress(pgoffset);
                        }
                    }

                    if (m_options.Dryrun)
                    {
                        tr.Rollback();
                    }
                    else
                    {
                        tr.Commit();
                    }

                    m_result.OperationProgressUpdater.UpdateProgress(0.95f);

                    if (missing != null && missing.Count > 0)
                    {
                        using (var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, db))
                        {
                            foreach (var f in missing)
                            {
                                if (m_options.Dryrun)
                                {
                                    m_result.AddDryrunMessage(string.Format("Would delete remote file: {0}, size: {1}", f.Name, Library.Utility.Utility.FormatSizeString(f.Size)));
                                }
                                else
                                {
                                    backend.Delete(f.Name, f.Size);
                                }
                            }
                        }
                    }

                    if (!m_options.Dryrun && db.RepairInProgress)
                    {
                        m_result.AddMessage("Database was previously marked as in-progress, checking if it is valid after purging files");
                        db.VerifyConsistency(null, m_options.Blocksize, m_options.BlockhashSize, true);
                        m_result.AddMessage("Purge completed, and consistency checks completed, marking database as complete");
                        db.RepairInProgress = false;
                    }

                    m_result.OperationProgressUpdater.UpdateProgress(1.0f);
                }
        }
Пример #55
0
        public void Run(IEnumerable <string> filterstrings = null, Library.Utility.IFilter compositefilter = null)
        {
            var parsedfilter = new Library.Utility.FilterExpression(filterstrings);
            var simpleList   = !(parsedfilter.Type == Library.Utility.FilterType.Simple || m_options.AllVersions);
            var filter       = Library.Utility.JoinedFilterExpression.Join(parsedfilter, compositefilter);

            //Use a speedy local query
            if (!m_options.NoLocalDb && System.IO.File.Exists(m_options.Dbpath))
            {
                using (var db = new Database.LocalListDatabase(m_options.Dbpath))
                {
                    m_result.SetDatabase(db);
                    using (var filesets = db.SelectFileSets(m_options.Time, m_options.Version))
                    {
                        if (parsedfilter.Type != Library.Utility.FilterType.Empty)
                        {
                            if (simpleList || (m_options.ListFolderContents && !m_options.AllVersions))
                            {
                                filesets.TakeFirst();
                            }
                        }

                        IEnumerable <Database.LocalListDatabase.IFileversion> files;
                        if (m_options.ListFolderContents)
                        {
                            files = filesets.SelectFolderContents(filter);
                        }
                        else if (m_options.ListPrefixOnly)
                        {
                            files = filesets.GetLargestPrefix(filter);
                        }
                        else if (parsedfilter.Type == Duplicati.Library.Utility.FilterType.Empty)
                        {
                            files = null;
                        }
                        else
                        {
                            files = filesets.SelectFiles(filter);
                        }

                        if (m_options.ListSetsOnly)
                        {
                            m_result.SetResult(
                                filesets.QuickSets.Select(x => new ListResultFileset(x.Version, x.Time, x.FileCount, x.FileSizes)).ToArray(),
                                null
                                );
                        }
                        else
                        {
                            m_result.SetResult(
                                filesets.Sets.Select(x => new ListResultFileset(x.Version, x.Time, x.FileCount, x.FileSizes)).ToArray(),
                                files == null ? null :
                                (from n in files
                                 select(Duplicati.Library.Interface.IListResultFile)(new ListResultFile(n.Path, n.Sizes.ToArray())))
                                .ToArray()
                                );
                        }


                        return;
                    }
                }
            }

            m_result.AddMessage("No local database, accessing remote store");

            //TODO: Add prefix and foldercontents
            if (m_options.ListFolderContents)
            {
                throw new Exception("Listing folder contents is not supported without a local database, consider using the \"repair\" option to rebuild the database.");
            }
            else if (m_options.ListPrefixOnly)
            {
                throw new Exception("Listing prefixes is not supported without a local database, consider using the \"repair\" option to rebuild the database.");
            }

            // Otherwise, grab info from remote location
            using (var tmpdb = new Library.Utility.TempFile())
                using (var db = new Database.LocalDatabase(tmpdb, "List", true))
                    using (var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, db))
                    {
                        m_result.SetDatabase(db);

                        var filteredList = ParseAndFilterFilesets(backend.List(), m_options);
                        if (filteredList.Count == 0)
                        {
                            throw new Exception("No filesets found on remote target");
                        }

                        var numberSeq = CreateResultSequence(filteredList);
                        if (parsedfilter.Type == Library.Utility.FilterType.Empty)
                        {
                            m_result.SetResult(numberSeq, null);
                            m_result.EncryptedFiles = filteredList.Any(x => !string.IsNullOrWhiteSpace(x.Value.EncryptionModule));
                            return;
                        }

                        var firstEntry = filteredList[0].Value;
                        filteredList.RemoveAt(0);
                        Dictionary <string, List <long> > res;

                        if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                        {
                            return;
                        }

                        using (var tmpfile = backend.Get(firstEntry.File.Name, firstEntry.File.Size, null))
                            using (var rd = new Volumes.FilesetVolumeReader(RestoreHandler.GetCompressionModule(firstEntry.File.Name), tmpfile, m_options))
                                if (simpleList)
                                {
                                    m_result.SetResult(
                                        numberSeq.Take(1),
                                        (from n in rd.Files
                                         where Library.Utility.FilterExpression.Matches(filter, n.Path)
                                         orderby n.Path
                                         select new ListResultFile(n.Path, new long[] { n.Size }))
                                        .ToArray()
                                        );

                                    return;
                                }
                                else
                                {
                                    res = rd.Files
                                          .Where(x => Library.Utility.FilterExpression.Matches(filter, x.Path))
                                          .ToDictionary(
                                        x => x.Path,
                                        y =>
                                    {
                                        var lst = new List <long>();
                                        lst.Add(y.Size);
                                        return(lst);
                                    },
                                        Library.Utility.Utility.ClientFilenameStringComparer
                                        );
                                }

                        long flindex = 1;
                        foreach (var flentry in filteredList)
                        {
                            using (var tmpfile = backend.Get(flentry.Value.File.Name, flentry.Value.File == null ? -1 : flentry.Value.File.Size, null))
                                using (var rd = new Volumes.FilesetVolumeReader(flentry.Value.CompressionModule, tmpfile, m_options))
                                {
                                    if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                                    {
                                        return;
                                    }

                                    foreach (var p in from n in rd.Files where Library.Utility.FilterExpression.Matches(filter, n.Path) select n)
                                    {
                                        List <long> lst;
                                        if (!res.TryGetValue(p.Path, out lst))
                                        {
                                            lst         = new List <long>();
                                            res[p.Path] = lst;
                                            for (var i = 0; i < flindex; i++)
                                            {
                                                lst.Add(-1);
                                            }
                                        }

                                        lst.Add(p.Size);
                                    }

                                    foreach (var n in from i in res where i.Value.Count < flindex + 1 select i)
                                    {
                                        n.Value.Add(-1);
                                    }

                                    flindex++;
                                }
                        }

                        m_result.SetResult(
                            numberSeq,
                            from n in res
                            orderby n.Key
                            select(Duplicati.Library.Interface.IListResultFile)(new ListResultFile(n.Key, n.Value))
                            );
                    }
        }
Пример #56
0
        public void DoRun(long samples, LocalTestDatabase db, BackendManager backend)
        {
            var files = db.SelectTestTargets(samples, m_options).ToList();

            m_results.OperationProgressUpdater.UpdatePhase(OperationPhase.Verify_Running);
            m_results.OperationProgressUpdater.UpdateProgress(0);
            var progress = 0L;

            if (m_options.FullRemoteVerification)
            {
                foreach (var vol in new AsyncDownloader(files, backend))
                {
                    try
                    {
                        if (m_results.TaskControlRendevouz() == TaskControlState.Stop)
                        {
                            backend.WaitForComplete(db, null);
                            m_results.EndTime = DateTime.UtcNow;
                            return;
                        }

                        progress++;
                        m_results.OperationProgressUpdater.UpdateProgress((float)progress / files.Count);

                        KeyValuePair <string, IEnumerable <KeyValuePair <TestEntryStatus, string> > > res;
                        using (var tf = vol.TempFile)
                            res = TestVolumeInternals(db, vol, tf, m_options, m_options.FullBlockVerification ? 1.0 : 0.2);
                        m_results.AddResult(res.Key, res.Value);

                        if (!string.IsNullOrWhiteSpace(vol.Hash) && vol.Size > 0)
                        {
                            if (res.Value == null || !res.Value.Any())
                            {
                                var rv = db.GetRemoteVolume(vol.Name, null);

                                if (rv.ID < 0)
                                {
                                    if (string.IsNullOrWhiteSpace(rv.Hash) || rv.Size <= 0)
                                    {
                                        if (m_options.Dryrun)
                                        {
                                            Logging.Log.WriteDryrunMessage(LOGTAG, "CaptureHashAndSize", "Sucessfully captured hash and size for {0}, would update database", vol.Name);
                                        }
                                        else
                                        {
                                            Logging.Log.WriteInformationMessage(LOGTAG, "CaptureHashAndSize", "Sucessfully captured hash and size for {0}, updating database", vol.Name);
                                            db.UpdateRemoteVolume(vol.Name, RemoteVolumeState.Verified, vol.Size, vol.Hash);
                                        }
                                    }
                                }
                            }
                        }

                        db.UpdateVerificationCount(vol.Name);
                    }
                    catch (Exception ex)
                    {
                        m_results.AddResult(vol.Name, new KeyValuePair <Duplicati.Library.Interface.TestEntryStatus, string>[] { new KeyValuePair <Duplicati.Library.Interface.TestEntryStatus, string>(Duplicati.Library.Interface.TestEntryStatus.Error, ex.Message) });
                        Logging.Log.WriteErrorMessage(LOGTAG, "RemoteFileProcessingFailed", ex, "Failed to process file {0}", vol.Name);
                        if (ex is System.Threading.ThreadAbortException)
                        {
                            m_results.EndTime = DateTime.UtcNow;
                            throw;
                        }
                    }
                }
            }
            else
            {
                foreach (var f in files)
                {
                    try
                    {
                        if (m_results.TaskControlRendevouz() == TaskControlState.Stop)
                        {
                            m_results.EndTime = DateTime.UtcNow;
                            return;
                        }

                        progress++;
                        m_results.OperationProgressUpdater.UpdateProgress((float)progress / files.Count);

                        if (f.Size <= 0 || string.IsNullOrWhiteSpace(f.Hash))
                        {
                            Logging.Log.WriteInformationMessage(LOGTAG, "MissingRemoteHash", "No hash or size recorded for {0}, performing full verification", f.Name);
                            KeyValuePair <string, IEnumerable <KeyValuePair <TestEntryStatus, string> > > res;
                            string hash;
                            long   size;

                            using (var tf = backend.GetWithInfo(f.Name, out size, out hash))
                                res = TestVolumeInternals(db, f, tf, m_options, 1);
                            m_results.AddResult(res.Key, res.Value);

                            if (!string.IsNullOrWhiteSpace(hash) && size > 0)
                            {
                                if (res.Value == null || !res.Value.Any())
                                {
                                    if (m_options.Dryrun)
                                    {
                                        Logging.Log.WriteDryrunMessage(LOGTAG, "CapturedHashAndSize", "Sucessfully captured hash and size for {0}, would update database", f.Name);
                                    }
                                    else
                                    {
                                        Logging.Log.WriteInformationMessage(LOGTAG, "CapturedHashAndSize", "Sucessfully captured hash and size for {0}, updating database", f.Name);
                                        db.UpdateRemoteVolume(f.Name, RemoteVolumeState.Verified, size, hash);
                                    }
                                }
                            }
                        }
                        else
                        {
                            backend.GetForTesting(f.Name, f.Size, f.Hash);
                        }

                        db.UpdateVerificationCount(f.Name);
                        m_results.AddResult(f.Name, new KeyValuePair <Duplicati.Library.Interface.TestEntryStatus, string> [0]);
                    }
                    catch (Exception ex)
                    {
                        m_results.AddResult(f.Name, new KeyValuePair <Duplicati.Library.Interface.TestEntryStatus, string>[] { new KeyValuePair <Duplicati.Library.Interface.TestEntryStatus, string>(Duplicati.Library.Interface.TestEntryStatus.Error, ex.Message) });
                        Logging.Log.WriteErrorMessage(LOGTAG, "FailedToProcessFile", ex, "Failed to process file {0}", f.Name);
                        if (ex is System.Threading.ThreadAbortException)
                        {
                            m_results.EndTime = DateTime.UtcNow;
                            throw;
                        }
                    }
                }
            }

            m_results.EndTime = DateTime.UtcNow;
        }
Пример #57
0
        public void RunRepairRemote()
        {
            if (!System.IO.File.Exists(m_options.Dbpath))
            {
                throw new UserInformationException(string.Format("Database file does not exist: {0}", m_options.Dbpath), "RepairDatabaseFileDoesNotExist");
            }

            m_result.OperationProgressUpdater.UpdateProgress(0);

            using (var db = new LocalRepairDatabase(m_options.Dbpath))
                using (var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, db))
                {
                    m_result.SetDatabase(db);
                    Utility.UpdateOptionsFromDb(db, m_options);
                    Utility.VerifyParameters(db, m_options);

                    if (db.PartiallyRecreated)
                    {
                        throw new UserInformationException("The database was only partially recreated. This database may be incomplete and the repair process is not allowed to alter remote files as that could result in data loss.", "DatabaseIsPartiallyRecreated");
                    }

                    if (db.RepairInProgress)
                    {
                        throw new UserInformationException("The database was attempted repaired, but the repair did not complete. This database may be incomplete and the repair process is not allowed to alter remote files as that could result in data loss.", "DatabaseIsInRepairState");
                    }

                    var tp          = FilelistProcessor.RemoteListAnalysis(backend, m_options, db, m_result.BackendWriter, null);
                    var buffer      = new byte[m_options.Blocksize];
                    var blockhasher = Library.Utility.HashAlgorithmHelper.Create(m_options.BlockHashAlgorithm);
                    var hashsize    = blockhasher.HashSize / 8;

                    if (blockhasher == null)
                    {
                        throw new UserInformationException(Strings.Common.InvalidHashAlgorithm(m_options.BlockHashAlgorithm), "BlockHashAlgorithmNotSupported");
                    }
                    if (!blockhasher.CanReuseTransform)
                    {
                        throw new UserInformationException(Strings.Common.InvalidCryptoSystem(m_options.BlockHashAlgorithm), "BlockHashAlgorithmNotSupported");
                    }

                    var progress      = 0;
                    var targetProgess = tp.ExtraVolumes.Count() + tp.MissingVolumes.Count() + tp.VerificationRequiredVolumes.Count();

                    if (m_options.Dryrun)
                    {
                        if (tp.ParsedVolumes.Count() == 0 && tp.OtherVolumes.Count() > 0)
                        {
                            if (tp.BackupPrefixes.Length == 1)
                            {
                                throw new UserInformationException(string.Format("Found no backup files with prefix {0}, but files with prefix {1}, did you forget to set the backup prefix?", m_options.Prefix, tp.BackupPrefixes[0]), "RemoteFolderEmptyWithPrefix");
                            }
                            else
                            {
                                throw new UserInformationException(string.Format("Found no backup files with prefix {0}, but files with prefixes {1}, did you forget to set the backup prefix?", m_options.Prefix, string.Join(", ", tp.BackupPrefixes)), "RemoteFolderEmptyWithPrefix");
                            }
                        }
                        else if (tp.ParsedVolumes.Count() == 0 && tp.ExtraVolumes.Count() > 0)
                        {
                            throw new UserInformationException(string.Format("No files were missing, but {0} remote files were, found, did you mean to run recreate-database?", tp.ExtraVolumes.Count()), "NoRemoteFilesMissing");
                        }
                    }

                    if (tp.ExtraVolumes.Count() > 0 || tp.MissingVolumes.Count() > 0 || tp.VerificationRequiredVolumes.Count() > 0)
                    {
                        if (tp.VerificationRequiredVolumes.Any())
                        {
                            using (var testdb = new LocalTestDatabase(db))
                            {
                                foreach (var n in tp.VerificationRequiredVolumes)
                                {
                                    try
                                    {
                                        if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                                        {
                                            backend.WaitForComplete(db, null);
                                            return;
                                        }

                                        progress++;
                                        m_result.OperationProgressUpdater.UpdateProgress((float)progress / targetProgess);

                                        long   size;
                                        string hash;
                                        KeyValuePair <string, IEnumerable <KeyValuePair <Duplicati.Library.Interface.TestEntryStatus, string> > > res;

                                        using (var tf = backend.GetWithInfo(n.Name, out size, out hash))
                                            res = TestHandler.TestVolumeInternals(testdb, n, tf, m_options, 1);

                                        if (res.Value.Any())
                                        {
                                            throw new Exception(string.Format("Remote verification failure: {0}", res.Value.First()));
                                        }

                                        if (!m_options.Dryrun)
                                        {
                                            Logging.Log.WriteInformationMessage(LOGTAG, "CapturedRemoteFileHash", "Sucessfully captured hash for {0}, updating database", n.Name);
                                            db.UpdateRemoteVolume(n.Name, RemoteVolumeState.Verified, size, hash);
                                        }
                                    }
                                    catch (Exception ex)
                                    {
                                        Logging.Log.WriteErrorMessage(LOGTAG, "RemoteFileVerificationError", ex, "Failed to perform verification for file: {0}, please run verify; message: {1}", n.Name, ex.Message);
                                        if (ex is System.Threading.ThreadAbortException)
                                        {
                                            throw;
                                        }
                                    }
                                }
                            }
                        }

                        // TODO: It is actually possible to use the extra files if we parse them
                        foreach (var n in tp.ExtraVolumes)
                        {
                            try
                            {
                                if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                                {
                                    backend.WaitForComplete(db, null);
                                    return;
                                }

                                progress++;
                                m_result.OperationProgressUpdater.UpdateProgress((float)progress / targetProgess);

                                // If this is a new index file, we can accept it if it matches our local data
                                // This makes it possible to augment the remote store with new index data
                                if (n.FileType == RemoteVolumeType.Index && m_options.IndexfilePolicy != Options.IndexFileStrategy.None)
                                {
                                    try
                                    {
                                        string hash;
                                        long   size;
                                        using (var tf = backend.GetWithInfo(n.File.Name, out size, out hash))
                                            using (var ifr = new IndexVolumeReader(n.CompressionModule, tf, m_options, m_options.BlockhashSize))
                                            {
                                                foreach (var rv in ifr.Volumes)
                                                {
                                                    var entry = db.GetRemoteVolume(rv.Filename);
                                                    if (entry.ID < 0)
                                                    {
                                                        throw new Exception(string.Format("Unknown remote file {0} detected", rv.Filename));
                                                    }

                                                    if (!new [] { RemoteVolumeState.Uploading, RemoteVolumeState.Uploaded, RemoteVolumeState.Verified }.Contains(entry.State))
                                                    {
                                                        throw new Exception(string.Format("Volume {0} has local state {1}", rv.Filename, entry.State));
                                                    }

                                                    if (entry.Hash != rv.Hash || entry.Size != rv.Length || !new [] { RemoteVolumeState.Uploading, RemoteVolumeState.Uploaded, RemoteVolumeState.Verified }.Contains(entry.State))
                                                    {
                                                        throw new Exception(string.Format("Volume {0} hash/size mismatch ({1} - {2}) vs ({3} - {4})", rv.Filename, entry.Hash, entry.Size, rv.Hash, rv.Length));
                                                    }

                                                    db.CheckAllBlocksAreInVolume(rv.Filename, rv.Blocks);
                                                }

                                                var blocksize = m_options.Blocksize;
                                                foreach (var ixb in ifr.BlockLists)
                                                {
                                                    db.CheckBlocklistCorrect(ixb.Hash, ixb.Length, ixb.Blocklist, blocksize, hashsize);
                                                }

                                                var selfid = db.GetRemoteVolumeID(n.File.Name);
                                                foreach (var rv in ifr.Volumes)
                                                {
                                                    db.AddIndexBlockLink(selfid, db.GetRemoteVolumeID(rv.Filename), null);
                                                }
                                            }

                                        // All checks fine, we accept the new index file
                                        Logging.Log.WriteInformationMessage(LOGTAG, "AcceptNewIndexFile", "Accepting new index file {0}", n.File.Name);
                                        db.RegisterRemoteVolume(n.File.Name, RemoteVolumeType.Index, size, RemoteVolumeState.Uploading);
                                        db.UpdateRemoteVolume(n.File.Name, RemoteVolumeState.Verified, size, hash);
                                        continue;
                                    }
                                    catch (Exception rex)
                                    {
                                        Logging.Log.WriteErrorMessage(LOGTAG, "FailedNewIndexFile", rex, "Failed to accept new index file: {0}, message: {1}", n.File.Name, rex.Message);
                                    }
                                }

                                if (!m_options.Dryrun)
                                {
                                    db.RegisterRemoteVolume(n.File.Name, n.FileType, n.File.Size, RemoteVolumeState.Deleting);
                                    backend.Delete(n.File.Name, n.File.Size);
                                }
                                else
                                {
                                    Logging.Log.WriteDryrunMessage(LOGTAG, "WouldDeleteFile", "would delete file {0}", n.File.Name);
                                }
                            }
                            catch (Exception ex)
                            {
                                Logging.Log.WriteErrorMessage(LOGTAG, "FailedExtraFileCleanup", ex, "Failed to perform cleanup for extra file: {0}, message: {1}", n.File.Name, ex.Message);
                                if (ex is System.Threading.ThreadAbortException)
                                {
                                    throw;
                                }
                            }
                        }

                        if (!m_options.RebuildMissingDblockFiles)
                        {
                            var missingDblocks = tp.MissingVolumes.Where(x => x.Type == RemoteVolumeType.Blocks).ToArray();
                            if (missingDblocks.Length > 0)
                            {
                                throw new UserInformationException($"The backup storage destination is missing data files. You can either enable `--rebuild-missing-dblock-files` or run the purge command to remove these files. The following files are missing: {string.Join(", ", missingDblocks.Select(x => x.Name))}", "MissingDblockFiles");
                            }
                        }

                        foreach (var n in tp.MissingVolumes)
                        {
                            IDisposable newEntry = null;

                            try
                            {
                                if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                                {
                                    backend.WaitForComplete(db, null);
                                    return;
                                }

                                progress++;
                                m_result.OperationProgressUpdater.UpdateProgress((float)progress / targetProgess);

                                if (n.Type == RemoteVolumeType.Files)
                                {
                                    var filesetId = db.GetFilesetIdFromRemotename(n.Name);
                                    var w         = new FilesetVolumeWriter(m_options, DateTime.UtcNow);
                                    newEntry = w;
                                    w.SetRemoteFilename(n.Name);

                                    db.WriteFileset(w, filesetId, null);

                                    w.Close();
                                    if (m_options.Dryrun)
                                    {
                                        Logging.Log.WriteDryrunMessage(LOGTAG, "WouldReUploadFileset", "would re-upload fileset {0}, with size {1}, previous size {2}", n.Name, Library.Utility.Utility.FormatSizeString(new System.IO.FileInfo(w.LocalFilename).Length), Library.Utility.Utility.FormatSizeString(n.Size));
                                    }
                                    else
                                    {
                                        db.UpdateRemoteVolume(w.RemoteFilename, RemoteVolumeState.Uploading, -1, null, null);
                                        backend.Put(w);
                                    }
                                }
                                else if (n.Type == RemoteVolumeType.Index)
                                {
                                    var w = new IndexVolumeWriter(m_options);
                                    newEntry = w;
                                    w.SetRemoteFilename(n.Name);

                                    var h = Library.Utility.HashAlgorithmHelper.Create(m_options.BlockHashAlgorithm);

                                    foreach (var blockvolume in db.GetBlockVolumesFromIndexName(n.Name))
                                    {
                                        w.StartVolume(blockvolume.Name);
                                        var volumeid = db.GetRemoteVolumeID(blockvolume.Name);

                                        foreach (var b in db.GetBlocks(volumeid))
                                        {
                                            w.AddBlock(b.Hash, b.Size);
                                        }

                                        w.FinishVolume(blockvolume.Hash, blockvolume.Size);

                                        if (m_options.IndexfilePolicy == Options.IndexFileStrategy.Full)
                                        {
                                            foreach (var b in db.GetBlocklists(volumeid, m_options.Blocksize, hashsize))
                                            {
                                                var bh = Convert.ToBase64String(h.ComputeHash(b.Item2, 0, b.Item3));
                                                if (bh != b.Item1)
                                                {
                                                    throw new Exception(string.Format("Internal consistency check failed, generated index block has wrong hash, {0} vs {1}", bh, b.Item1));
                                                }

                                                w.WriteBlocklist(b.Item1, b.Item2, 0, b.Item3);
                                            }
                                        }
                                    }

                                    w.Close();

                                    if (m_options.Dryrun)
                                    {
                                        Logging.Log.WriteDryrunMessage(LOGTAG, "WouldReUploadIndexFile", "would re-upload index file {0}, with size {1}, previous size {2}", n.Name, Library.Utility.Utility.FormatSizeString(new System.IO.FileInfo(w.LocalFilename).Length), Library.Utility.Utility.FormatSizeString(n.Size));
                                    }
                                    else
                                    {
                                        db.UpdateRemoteVolume(w.RemoteFilename, RemoteVolumeState.Uploading, -1, null, null);
                                        backend.Put(w);
                                    }
                                }
                                else if (n.Type == RemoteVolumeType.Blocks)
                                {
                                    var w = new BlockVolumeWriter(m_options);
                                    newEntry = w;
                                    w.SetRemoteFilename(n.Name);

                                    using (var mbl = db.CreateBlockList(n.Name))
                                    {
                                        //First we grab all known blocks from local files
                                        foreach (var block in mbl.GetSourceFilesWithBlocks(m_options.Blocksize))
                                        {
                                            var hash = block.Hash;
                                            var size = (int)block.Size;

                                            foreach (var source in block.Sources)
                                            {
                                                var file   = source.File;
                                                var offset = source.Offset;

                                                try
                                                {
                                                    if (System.IO.File.Exists(file))
                                                    {
                                                        using (var f = System.IO.File.OpenRead(file))
                                                        {
                                                            f.Position = offset;
                                                            if (size == Library.Utility.Utility.ForceStreamRead(f, buffer, size))
                                                            {
                                                                var newhash = Convert.ToBase64String(blockhasher.ComputeHash(buffer, 0, size));
                                                                if (newhash == hash)
                                                                {
                                                                    if (mbl.SetBlockRestored(hash, size))
                                                                    {
                                                                        w.AddBlock(hash, buffer, 0, size, Duplicati.Library.Interface.CompressionHint.Default);
                                                                    }
                                                                    break;
                                                                }
                                                            }
                                                        }
                                                    }
                                                }
                                                catch (Exception ex)
                                                {
                                                    Logging.Log.WriteErrorMessage(LOGTAG, "FileAccessError", ex, "Failed to access file: {0}", file);
                                                }
                                            }
                                        }

                                        //Then we grab all remote volumes that have the missing blocks
                                        foreach (var vol in new AsyncDownloader(mbl.GetMissingBlockSources().ToList(), backend))
                                        {
                                            try
                                            {
                                                using (var tmpfile = vol.TempFile)
                                                    using (var f = new BlockVolumeReader(RestoreHandler.GetCompressionModule(vol.Name), tmpfile, m_options))
                                                        foreach (var b in f.Blocks)
                                                        {
                                                            if (mbl.SetBlockRestored(b.Key, b.Value))
                                                            {
                                                                if (f.ReadBlock(b.Key, buffer) == b.Value)
                                                                {
                                                                    w.AddBlock(b.Key, buffer, 0, (int)b.Value, Duplicati.Library.Interface.CompressionHint.Default);
                                                                }
                                                            }
                                                        }
                                            }
                                            catch (Exception ex)
                                            {
                                                Logging.Log.WriteErrorMessage(LOGTAG, "RemoteFileAccessError", ex, "Failed to access remote file: {0}", vol.Name);
                                            }
                                        }

                                        // If we managed to recover all blocks, NICE!
                                        var missingBlocks = mbl.GetMissingBlocks().Count();
                                        if (missingBlocks > 0)
                                        {
                                            Logging.Log.WriteInformationMessage(LOGTAG, "RepairMissingBlocks", "Repair cannot acquire {0} required blocks for volume {1}, which are required by the following filesets: ", missingBlocks, n.Name);
                                            foreach (var f in mbl.GetFilesetsUsingMissingBlocks())
                                            {
                                                Logging.Log.WriteInformationMessage(LOGTAG, "AffectedFilesetName", f.Name);
                                            }

                                            var recoverymsg = string.Format("If you want to continue working with the database, you can use the \"{0}\" and \"{1}\" commands to purge the missing data from the database and the remote storage.", "list-broken-files", "purge-broken-files");

                                            if (!m_options.Dryrun)
                                            {
                                                Logging.Log.WriteInformationMessage(LOGTAG, "RecoverySuggestion", "This may be fixed by deleting the filesets and running repair again");

                                                throw new UserInformationException(string.Format("Repair not possible, missing {0} blocks.\n" + recoverymsg, missingBlocks), "RepairIsNotPossible");
                                            }
                                            else
                                            {
                                                Logging.Log.WriteInformationMessage(LOGTAG, "RecoverySuggestion", recoverymsg);
                                            }
                                        }
                                        else
                                        {
                                            if (m_options.Dryrun)
                                            {
                                                Logging.Log.WriteDryrunMessage(LOGTAG, "WouldReUploadBlockFile", "would re-upload block file {0}, with size {1}, previous size {2}", n.Name, Library.Utility.Utility.FormatSizeString(new System.IO.FileInfo(w.LocalFilename).Length), Library.Utility.Utility.FormatSizeString(n.Size));
                                            }
                                            else
                                            {
                                                db.UpdateRemoteVolume(w.RemoteFilename, RemoteVolumeState.Uploading, -1, null, null);
                                                backend.Put(w);
                                            }
                                        }
                                    }
                                }
                            }
                            catch (Exception ex)
                            {
                                if (newEntry != null)
                                {
                                    try { newEntry.Dispose(); }
                                    catch { }
                                    finally { newEntry = null; }
                                }

                                Logging.Log.WriteErrorMessage(LOGTAG, "CleanupMissingFileError", ex, "Failed to perform cleanup for missing file: {0}, message: {1}", n.Name, ex.Message);

                                if (ex is System.Threading.ThreadAbortException)
                                {
                                    throw;
                                }
                            }
                        }
                    }
                    else
                    {
                        Logging.Log.WriteInformationMessage(LOGTAG, "DatabaseIsSynchronized", "Destination and database are synchronized, not making any changes");
                    }

                    m_result.OperationProgressUpdater.UpdateProgress(1);
                    backend.WaitForComplete(db, null);
                    db.WriteResults();
                }
        }
Пример #58
0
        /// <summary>
        /// Helper method that verifies uploaded volumes and updates their state in the database.
        /// Throws an error if there are issues with the remote storage
        /// </summary>
        /// <param name="backend">The backend instance to use</param>
        /// <param name="options">The options used</param>
        /// <param name="database">The database to compare with</param>
        /// <param name="protectedfile">A filename that should be excempted for deletion</param>
        public static RemoteAnalysisResult RemoteListAnalysis(BackendManager backend, Options options, LocalDatabase database, IBackendWriter log, string protectedfile)
        {
            var rawlist = backend.List();
            var lookup  = new Dictionary <string, Volumes.IParsedVolume>();

            protectedfile = protectedfile ?? string.Empty;

            var remotelist = (from n in rawlist
                              let p = Volumes.VolumeBase.ParseFilename(n)
                                      where p != null && p.Prefix == options.Prefix
                                      select p).ToList();

            var otherlist = (from n in rawlist
                             let p = Volumes.VolumeBase.ParseFilename(n)
                                     where p != null && p.Prefix != options.Prefix
                                     select p).ToList();

            var unknownlist = (from n in rawlist
                               let p = Volumes.VolumeBase.ParseFilename(n)
                                       where p == null
                                       select n).ToList();

            var filesets = (from n in remotelist
                            where n.FileType == RemoteVolumeType.Files orderby n.Time descending
                            select n).ToList();

            log.KnownFileCount   = remotelist.Count;
            log.KnownFileSize    = remotelist.Select(x => Math.Max(0, x.File.Size)).Sum();
            log.UnknownFileCount = unknownlist.Count;
            log.UnknownFileSize  = unknownlist.Select(x => Math.Max(0, x.Size)).Sum();
            log.BackupListCount  = filesets.Count;
            log.LastBackupDate   = filesets.Count == 0 ? new DateTime(0) : filesets[0].Time.ToLocalTime();

            // TODO: We should query through the backendmanager
            using (var bk = DynamicLoader.BackendLoader.GetBackend(backend.BackendUrl, options.RawOptions))
                if (bk is Library.Interface.IQuotaEnabledBackend)
                {
                    Library.Interface.IQuotaInfo quota = ((Library.Interface.IQuotaEnabledBackend)bk).Quota;
                    if (quota != null)
                    {
                        log.TotalQuotaSpace = quota.TotalQuotaSpace;
                        log.FreeQuotaSpace  = quota.FreeQuotaSpace;
                    }
                }

            log.AssignedQuotaSpace = options.QuotaSize;

            foreach (var s in remotelist)
            {
                lookup[s.File.Name] = s;
            }

            var missing     = new List <RemoteVolumeEntry>();
            var missingHash = new List <Tuple <long, RemoteVolumeEntry> >();
            var cleanupRemovedRemoteVolumes = new HashSet <string>();

            foreach (var e in database.DuplicateRemoteVolumes())
            {
                if (e.Value == RemoteVolumeState.Uploading || e.Value == RemoteVolumeState.Temporary)
                {
                    database.UnlinkRemoteVolume(e.Key, e.Value);
                }
                else
                {
                    throw new Exception(string.Format("The remote volume {0} appears in the database with state {1} and a deleted state, cannot continue", e.Key, e.Value.ToString()));
                }
            }

            var locallist = database.GetRemoteVolumes();

            foreach (var i in locallist)
            {
                Volumes.IParsedVolume r;
                var remoteFound = lookup.TryGetValue(i.Name, out r);
                var correctSize = remoteFound && i.Size >= 0 && (i.Size == r.File.Size || r.File.Size < 0);

                lookup.Remove(i.Name);

                switch (i.State)
                {
                case RemoteVolumeState.Deleted:
                    if (remoteFound)
                    {
                        log.AddMessage(string.Format("ignoring remote file listed as {0}: {1}", i.State, i.Name));
                    }

                    break;

                case RemoteVolumeState.Temporary:
                case RemoteVolumeState.Deleting:
                    if (remoteFound)
                    {
                        log.AddMessage(string.Format("removing remote file listed as {0}: {1}", i.State, i.Name));
                        backend.Delete(i.Name, i.Size, true);
                    }
                    else
                    {
                        if (i.deleteGracePeriod > DateTime.UtcNow)
                        {
                            log.AddMessage(string.Format("keeping delete request for {0} until {1}", i.Name, i.deleteGracePeriod.ToLocalTime()));
                        }
                        else
                        {
                            if (string.Equals(i.Name, protectedfile) && i.State == RemoteVolumeState.Temporary)
                            {
                                log.AddMessage(string.Format("keeping protected incomplete remote file listed as {0}: {1}", i.State, i.Name));
                            }
                            else
                            {
                                log.AddMessage(string.Format("removing file listed as {0}: {1}", i.State, i.Name));
                                cleanupRemovedRemoteVolumes.Add(i.Name);
                            }
                        }
                    }
                    break;

                case RemoteVolumeState.Uploading:
                    if (remoteFound && correctSize && r.File.Size >= 0)
                    {
                        log.AddMessage(string.Format("promoting uploaded complete file from {0} to {2}: {1}", i.State, i.Name, RemoteVolumeState.Uploaded));
                        database.UpdateRemoteVolume(i.Name, RemoteVolumeState.Uploaded, i.Size, i.Hash);
                    }
                    else if (!remoteFound)
                    {
                        if (string.Equals(i.Name, protectedfile))
                        {
                            log.AddMessage(string.Format("keeping protected incomplete remote file listed as {0}: {1}", i.State, i.Name));
                            database.UpdateRemoteVolume(i.Name, RemoteVolumeState.Temporary, i.Size, i.Hash, false, new TimeSpan(0), null);
                        }
                        else
                        {
                            log.AddMessage(string.Format("scheduling missing file for deletion, currently listed as {0}: {1}", i.State, i.Name));
                            cleanupRemovedRemoteVolumes.Add(i.Name);
                            database.UpdateRemoteVolume(i.Name, RemoteVolumeState.Deleting, i.Size, i.Hash, false, TimeSpan.FromHours(2), null);
                        }
                    }
                    else
                    {
                        if (string.Equals(i.Name, protectedfile))
                        {
                            log.AddMessage(string.Format("keeping protected incomplete remote file listed as {0}: {1}", i.State, i.Name));
                        }
                        else
                        {
                            log.AddMessage(string.Format("removing incomplete remote file listed as {0}: {1}", i.State, i.Name));
                            backend.Delete(i.Name, i.Size, true);
                        }
                    }
                    break;

                case RemoteVolumeState.Uploaded:
                    if (!remoteFound)
                    {
                        missing.Add(i);
                    }
                    else if (correctSize)
                    {
                        database.UpdateRemoteVolume(i.Name, RemoteVolumeState.Verified, i.Size, i.Hash);
                    }
                    else
                    {
                        missingHash.Add(new Tuple <long, RemoteVolumeEntry>(r.File.Size, i));
                    }

                    break;

                case RemoteVolumeState.Verified:
                    if (!remoteFound)
                    {
                        missing.Add(i);
                    }
                    else if (!correctSize)
                    {
                        missingHash.Add(new Tuple <long, RemoteVolumeEntry>(r.File.Size, i));
                    }

                    break;

                default:
                    log.AddWarning(string.Format("unknown state for remote file listed as {0}: {1}", i.State, i.Name), null);
                    break;
                }

                backend.FlushDbMessages();
            }

            // cleanup deleted volumes in DB en block
            database.RemoveRemoteVolumes(cleanupRemovedRemoteVolumes, null);

            foreach (var i in missingHash)
            {
                log.AddWarning(string.Format("remote file {1} is listed as {0} with size {2} but should be {3}, please verify the sha256 hash \"{4}\"", i.Item2.State, i.Item2.Name, i.Item1, i.Item2.Size, i.Item2.Hash), null);
            }

            return(new RemoteAnalysisResult()
            {
                ParsedVolumes = remotelist,
                OtherVolumes = otherlist,
                ExtraVolumes = lookup.Values,
                MissingVolumes = missing,
                VerificationRequiredVolumes = missingHash.Select(x => x.Item2)
            });
        }
Пример #59
0
        public void RunRepairRemote()
        {
            if (!System.IO.File.Exists(m_options.Dbpath))
            {
                throw new Exception(string.Format("Database file does not exist: {0}", m_options.Dbpath));
            }

            m_result.OperationProgressUpdater.UpdateProgress(0);

            using (var db = new LocalRepairDatabase(m_options.Dbpath))
                using (var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, db))
                {
                    m_result.SetDatabase(db);
                    Utility.VerifyParameters(db, m_options);

                    var tp          = FilelistProcessor.RemoteListAnalysis(backend, m_options, db, m_result.BackendWriter);
                    var buffer      = new byte[m_options.Blocksize];
                    var blockhasher = System.Security.Cryptography.HashAlgorithm.Create(m_options.BlockHashAlgorithm);
                    var hashsize    = blockhasher.HashSize / 8;

                    if (blockhasher == null)
                    {
                        throw new Exception(Strings.Foresthash.InvalidHashAlgorithm(m_options.BlockHashAlgorithm));
                    }
                    if (!blockhasher.CanReuseTransform)
                    {
                        throw new Exception(Strings.Foresthash.InvalidCryptoSystem(m_options.BlockHashAlgorithm));
                    }

                    var progress      = 0;
                    var targetProgess = tp.ExtraVolumes.Count() + tp.MissingVolumes.Count() + tp.VerificationRequiredVolumes.Count();

                    if (m_options.Dryrun)
                    {
                        if (tp.ParsedVolumes.Count() == 0 && tp.OtherVolumes.Count() > 0)
                        {
                            if (tp.BackupPrefixes.Length == 1)
                            {
                                throw new Exception(string.Format("Found no backup files with prefix {0}, but files with prefix {1}, did you forget to set the backup-prefix?", m_options.Prefix, tp.BackupPrefixes[0]));
                            }
                            else
                            {
                                throw new Exception(string.Format("Found no backup files with prefix {0}, but files with prefixes {1}, did you forget to set the backup-prefix?", m_options.Prefix, string.Join(", ", tp.BackupPrefixes)));
                            }
                        }
                        else if (tp.ParsedVolumes.Count() == 0 && tp.ExtraVolumes.Count() > 0)
                        {
                            throw new Exception(string.Format("No files were missing, but {0} remote files were, found, did you mean to run recreate-database?", tp.ExtraVolumes.Count()));
                        }
                    }

                    if (tp.ExtraVolumes.Count() > 0 || tp.MissingVolumes.Count() > 0 || tp.VerificationRequiredVolumes.Count() > 0)
                    {
                        if (tp.VerificationRequiredVolumes.Any())
                        {
                            using (var testdb = new LocalTestDatabase(db))
                            {
                                foreach (var n in tp.VerificationRequiredVolumes)
                                {
                                    try
                                    {
                                        if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                                        {
                                            backend.WaitForComplete(db, null);
                                            return;
                                        }

                                        progress++;
                                        m_result.OperationProgressUpdater.UpdateProgress((float)progress / targetProgess);

                                        long   size;
                                        string hash;
                                        KeyValuePair <string, IEnumerable <KeyValuePair <Duplicati.Library.Interface.TestEntryStatus, string> > > res;

                                        using (var tf = backend.GetWithInfo(n.Name, out size, out hash))
                                            res = TestHandler.TestVolumeInternals(testdb, n, tf, m_options, m_result, 1);

                                        if (res.Value.Any())
                                        {
                                            throw new Exception(string.Format("Remote verification failure: {0}", res.Value.First()));
                                        }

                                        if (!m_options.Dryrun)
                                        {
                                            m_result.AddMessage(string.Format("Sucessfully captured hash for {0}, updating database", n.Name));
                                            db.UpdateRemoteVolume(n.Name, RemoteVolumeState.Verified, size, hash);
                                        }
                                    }
                                    catch (Exception ex)
                                    {
                                        m_result.AddError(string.Format("Failed to perform verification for file: {0}, please run verify; message: {1}", n.Name, ex.Message), ex);
                                        if (ex is System.Threading.ThreadAbortException)
                                        {
                                            throw;
                                        }
                                    }
                                }
                            }
                        }

                        // TODO: It is actually possible to use the extra files if we parse them
                        foreach (var n in tp.ExtraVolumes)
                        {
                            try
                            {
                                if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                                {
                                    backend.WaitForComplete(db, null);
                                    return;
                                }

                                progress++;
                                m_result.OperationProgressUpdater.UpdateProgress((float)progress / targetProgess);

                                if (!m_options.Dryrun)
                                {
                                    db.RegisterRemoteVolume(n.File.Name, n.FileType, RemoteVolumeState.Deleting);
                                    backend.Delete(n.File.Name, n.File.Size);
                                }
                                else
                                {
                                    m_result.AddDryrunMessage(string.Format("would delete file {0}", n.File.Name));
                                }
                            }
                            catch (Exception ex)
                            {
                                m_result.AddError(string.Format("Failed to perform cleanup for extra file: {0}, message: {1}", n.File.Name, ex.Message), ex);
                                if (ex is System.Threading.ThreadAbortException)
                                {
                                    throw;
                                }
                            }
                        }

                        foreach (var n in tp.MissingVolumes)
                        {
                            IDisposable newEntry = null;

                            try
                            {
                                if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                                {
                                    backend.WaitForComplete(db, null);
                                    return;
                                }

                                progress++;
                                m_result.OperationProgressUpdater.UpdateProgress((float)progress / targetProgess);

                                if (n.Type == RemoteVolumeType.Files)
                                {
                                    var filesetId = db.GetFilesetIdFromRemotename(n.Name);
                                    var w         = new FilesetVolumeWriter(m_options, DateTime.UtcNow);
                                    newEntry = w;
                                    w.SetRemoteFilename(n.Name);

                                    db.WriteFileset(w, null, filesetId);

                                    w.Close();
                                    if (m_options.Dryrun)
                                    {
                                        m_result.AddDryrunMessage(string.Format("would re-upload fileset {0}, with size {1}, previous size {2}", n.Name, Library.Utility.Utility.FormatSizeString(new System.IO.FileInfo(w.LocalFilename).Length), Library.Utility.Utility.FormatSizeString(n.Size)));
                                    }
                                    else
                                    {
                                        db.UpdateRemoteVolume(w.RemoteFilename, RemoteVolumeState.Uploading, -1, null, null);
                                        backend.Put(w);
                                    }
                                }
                                else if (n.Type == RemoteVolumeType.Index)
                                {
                                    var w = new IndexVolumeWriter(m_options);
                                    newEntry = w;
                                    w.SetRemoteFilename(n.Name);

                                    foreach (var blockvolume in db.GetBlockVolumesFromIndexName(n.Name))
                                    {
                                        w.StartVolume(blockvolume.Name);
                                        var volumeid = db.GetRemoteVolumeID(blockvolume.Name);

                                        foreach (var b in db.GetBlocks(volumeid))
                                        {
                                            w.AddBlock(b.Hash, b.Size);
                                        }

                                        w.FinishVolume(blockvolume.Hash, blockvolume.Size);

                                        if (m_options.IndexfilePolicy == Options.IndexFileStrategy.Full)
                                        {
                                            foreach (var b in db.GetBlocklists(volumeid, m_options.Blocksize, hashsize))
                                            {
                                                w.WriteBlocklist(b.Item1, b.Item2, 0, b.Item3);
                                            }
                                        }
                                    }

                                    w.Close();

                                    if (m_options.Dryrun)
                                    {
                                        m_result.AddDryrunMessage(string.Format("would re-upload index file {0}, with size {1}, previous size {2}", n.Name, Library.Utility.Utility.FormatSizeString(new System.IO.FileInfo(w.LocalFilename).Length), Library.Utility.Utility.FormatSizeString(n.Size)));
                                    }
                                    else
                                    {
                                        db.UpdateRemoteVolume(w.RemoteFilename, RemoteVolumeState.Uploading, -1, null, null);
                                        backend.Put(w);
                                    }
                                }
                                else if (n.Type == RemoteVolumeType.Blocks)
                                {
                                    var w = new BlockVolumeWriter(m_options);
                                    newEntry = w;
                                    w.SetRemoteFilename(n.Name);

                                    using (var mbl = db.CreateBlockList(n.Name))
                                    {
                                        //First we grab all known blocks from local files
                                        foreach (var block in mbl.GetSourceFilesWithBlocks(m_options.Blocksize))
                                        {
                                            var hash = block.Hash;
                                            var size = (int)block.Size;

                                            foreach (var source in block.Sources)
                                            {
                                                var file   = source.File;
                                                var offset = source.Offset;

                                                try
                                                {
                                                    if (System.IO.File.Exists(file))
                                                    {
                                                        using (var f = System.IO.File.OpenRead(file))
                                                        {
                                                            f.Position = offset;
                                                            if (size == Library.Utility.Utility.ForceStreamRead(f, buffer, size))
                                                            {
                                                                var newhash = Convert.ToBase64String(blockhasher.ComputeHash(buffer, 0, size));
                                                                if (newhash == hash)
                                                                {
                                                                    if (mbl.SetBlockRestored(hash, size))
                                                                    {
                                                                        w.AddBlock(hash, buffer, 0, size, Duplicati.Library.Interface.CompressionHint.Default);
                                                                    }
                                                                    break;
                                                                }
                                                            }
                                                        }
                                                    }
                                                }
                                                catch (Exception ex)
                                                {
                                                    m_result.AddError(string.Format("Failed to access file: {0}", file), ex);
                                                }
                                            }
                                        }

                                        //Then we grab all remote volumes that have the missing blocks
                                        foreach (var vol in new AsyncDownloader(mbl.GetMissingBlockSources().ToList(), backend))
                                        {
                                            try
                                            {
                                                using (var tmpfile = vol.TempFile)
                                                    using (var f = new BlockVolumeReader(RestoreHandler.GetCompressionModule(vol.Name), tmpfile, m_options))
                                                        foreach (var b in f.Blocks)
                                                        {
                                                            if (mbl.SetBlockRestored(b.Key, b.Value))
                                                            {
                                                                if (f.ReadBlock(b.Key, buffer) == b.Value)
                                                                {
                                                                    w.AddBlock(b.Key, buffer, 0, (int)b.Value, Duplicati.Library.Interface.CompressionHint.Default);
                                                                }
                                                            }
                                                        }
                                            }
                                            catch (Exception ex)
                                            {
                                                m_result.AddError(string.Format("Failed to access remote file: {0}", vol.Name), ex);
                                            }
                                        }

                                        // If we managed to recover all blocks, NICE!
                                        var missingBlocks = mbl.GetMissingBlocks().Count();
                                        if (missingBlocks > 0)
                                        {
                                            //TODO: How do we handle this situation?
                                            m_result.AddMessage(string.Format("Repair cannot acquire {0} required blocks for volume {1}, which are required by the following filesets: ", missingBlocks, n.Name));
                                            foreach (var f in mbl.GetFilesetsUsingMissingBlocks())
                                            {
                                                m_result.AddMessage(f.Name);
                                            }

                                            if (!m_options.Dryrun)
                                            {
                                                m_result.AddMessage("This may be fixed by deleting the filesets and running repair again");

                                                throw new Exception(string.Format("Repair not possible, missing {0} blocks!!!", missingBlocks));
                                            }
                                        }
                                        else
                                        {
                                            if (m_options.Dryrun)
                                            {
                                                m_result.AddDryrunMessage(string.Format("would re-upload block file {0}, with size {1}, previous size {2}", n.Name, Library.Utility.Utility.FormatSizeString(new System.IO.FileInfo(w.LocalFilename).Length), Library.Utility.Utility.FormatSizeString(n.Size)));
                                            }
                                            else
                                            {
                                                db.UpdateRemoteVolume(w.RemoteFilename, RemoteVolumeState.Uploading, -1, null, null);
                                                backend.Put(w);
                                            }
                                        }
                                    }
                                }
                            }
                            catch (Exception ex)
                            {
                                if (newEntry != null)
                                {
                                    try { newEntry.Dispose(); }
                                    catch { }
                                    finally { newEntry = null; }
                                }

                                m_result.AddError(string.Format("Failed to perform cleanup for missing file: {0}, message: {1}", n.Name, ex.Message), ex);

                                if (ex is System.Threading.ThreadAbortException)
                                {
                                    throw;
                                }
                            }
                        }
                    }
                    else
                    {
                        m_result.AddMessage("Destination and database are synchronized, not making any changes");
                    }

                    m_result.OperationProgressUpdater.UpdateProgress(1);
                    backend.WaitForComplete(db, null);
                    db.WriteResults();
                }
        }
Пример #60
0
        private void CompactIfRequired(BackendManager backend, long lastVolumeSize)
        {
            var currentIsSmall = lastVolumeSize != -1 && lastVolumeSize <= m_options.SmallFileSize;

            if (m_options.KeepTime.Ticks > 0 || m_options.KeepVersions != 0)
            {
                m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_Delete);
                m_result.DeleteResults = new DeleteResults(m_result);
                using(var db = new LocalDeleteDatabase(m_database))
                    new DeleteHandler(backend.BackendUrl, m_options, (DeleteResults)m_result.DeleteResults).DoRun(db, ref m_transaction, true, currentIsSmall);

            }
            else if (currentIsSmall && !m_options.NoAutoCompact)
            {
                m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_Compact);
                m_result.CompactResults = new CompactResults(m_result);
                using(var db = new LocalDeleteDatabase(m_database))
                    new CompactHandler(backend.BackendUrl, m_options, (CompactResults)m_result.CompactResults).DoCompact(db, true, ref m_transaction);
            }
        }