Exemplo n.º 1
0
        /// <summary>
        /// Gets the compression module by parsing the filename
        /// </summary>
        /// <param name="filename">The filename to parse</param>
        /// <returns>The compression module</returns>
        public static string GetCompressionModule(string filename)
        {
            var tmp = VolumeBase.ParseFilename(filename);

            if (tmp == null)
            {
                throw new Exception(string.Format("Unable to parse filename to valid entry: {0}", filename));
            }

            return(tmp.CompressionModule);
        }
Exemplo n.º 2
0
        private async Task RenameFileAfterErrorAsync(FileEntryItem item)
        {
            var p       = VolumeBase.ParseFilename(item.RemoteFilename);
            var guid    = VolumeWriterBase.GenerateGuid();
            var time    = p.Time.Ticks == 0 ? p.Time : p.Time.AddSeconds(1);
            var newname = VolumeBase.GenerateFilename(p.FileType, p.Prefix, guid, time, p.CompressionModule, p.EncryptionModule);
            var oldname = item.RemoteFilename;

            await m_stats.SendEventAsync(item.Operation, BackendEventType.Rename, oldname, item.Size);

            await m_stats.SendEventAsync(item.Operation, BackendEventType.Rename, newname, item.Size);

            Logging.Log.WriteInformationMessage(LOGTAG, "RenameRemoteTargetFile", "Renaming \"{0}\" to \"{1}\"", oldname, newname);
            await m_database.RenameRemoteFileAsync(oldname, newname);

            item.RemoteFilename = newname;
        }
Exemplo n.º 3
0
        /// <summary>
        /// Look in the database for filenames similar to the current filename, but with a different compression and encryption module
        /// </summary>
        /// <returns>The volume id of the item</returns>
        /// <param name="filename">The filename read and written</param>
        /// <param name="restoredb">The database to query</param>
        public long ProbeForMatchingFilename(ref string filename, LocalRestoreDatabase restoredb)
        {
            var p = VolumeBase.ParseFilename(filename);

            if (p != null)
            {
                foreach (var compmodule in Library.DynamicLoader.CompressionLoader.Keys)
                {
                    foreach (var encmodule in Library.DynamicLoader.EncryptionLoader.Keys.Union(new string[] { "" }))
                    {
                        var testfilename = VolumeBase.GenerateFilename(p.FileType, p.Prefix, p.Guid, p.Time, compmodule, encmodule);
                        var tvid         = restoredb.GetRemoteVolumeID(testfilename);
                        if (tvid >= 0)
                        {
                            Logging.Log.WriteWarningMessage(LOGTAG, "RewritingFilenameMapping", null, "Unable to find volume {0}, but mapping to matching file {1}", filename, testfilename);
                            filename = testfilename;
                            return(tvid);
                        }
                    }
                }
            }

            return(-1);
        }
Exemplo n.º 4
0
        /// <summary>
        /// Run the recreate procedure
        /// </summary>
        /// <param name="dbparent">The database to restore into</param>
        /// <param name="updating">True if this is an update call, false otherwise</param>
        /// <param name="filter">A filter that can be used to disregard certain remote files, intended to be used to select a certain filelist</param>
        /// <param name="filelistfilter">Filters the files in a filelist to prevent downloading unwanted data</param>
        /// <param name="blockprocessor">A callback hook that can be used to work with downloaded block volumes, intended to be use to recover data blocks while processing blocklists</param>
        internal void DoRun(LocalDatabase dbparent, bool updating, Library.Utility.IFilter filter = null, NumberedFilterFilelistDelegate filelistfilter = null, BlockVolumePostProcessor blockprocessor = null)
        {
            m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Recreate_Running);

            //We build a local database in steps.
            using (var restoredb = new LocalRecreateDatabase(dbparent, m_options))
                using (var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, restoredb))
                {
                    restoredb.RepairInProgress = true;

                    var volumeIds = new Dictionary <string, long>();

                    var rawlist = backend.List();

                    //First step is to examine the remote storage to see what
                    // kind of data we can find
                    var remotefiles =
                        (from x in rawlist
                         let n = VolumeBase.ParseFilename(x)
                                 where
                                 n != null
                                 &&
                                 n.Prefix == m_options.Prefix
                                 select n).ToArray(); //ToArray() ensures that we do not remote-request it multiple times

                    if (remotefiles.Length == 0)
                    {
                        if (rawlist.Count == 0)
                        {
                            throw new UserInformationException("No files were found at the remote location, perhaps the target url is incorrect?", "EmptyRemoteLocation");
                        }
                        else
                        {
                            var tmp =
                                (from x in rawlist
                                 let n = VolumeBase.ParseFilename(x)
                                         where
                                         n != null
                                         select n.Prefix).ToArray();

                            var types = tmp.Distinct().ToArray();
                            if (tmp.Length == 0)
                            {
                                throw new UserInformationException(string.Format("Found {0} files at the remote storage, but none that could be parsed", rawlist.Count), "EmptyRemoteLocation");
                            }
                            else if (types.Length == 1)
                            {
                                throw new UserInformationException(string.Format("Found {0} parse-able files with the prefix {1}, did you forget to set the backup prefix?", tmp.Length, types[0]), "EmptyRemoteLocationWithPrefix");
                            }
                            else
                            {
                                throw new UserInformationException(string.Format("Found {0} parse-able files (of {1} files) with different prefixes: {2}, did you forget to set the backup prefix?", tmp.Length, rawlist.Count, string.Join(", ", types)), "EmptyRemoteLocationWithPrefix");
                            }
                        }
                    }

                    //Then we select the filelist we should work with,
                    // and create the filelist table to fit
                    IEnumerable <IParsedVolume> filelists =
                        from n in remotefiles
                        where n.FileType == RemoteVolumeType.Files
                        orderby n.Time descending
                        select n;

                    if (filelists.Count() <= 0)
                    {
                        throw new UserInformationException(string.Format("No filelists found on the remote destination"), "EmptyRemoteLocation");
                    }

                    if (filelistfilter != null)
                    {
                        filelists = filelistfilter(filelists).Select(x => x.Value).ToArray();
                    }

                    if (filelists.Count() <= 0)
                    {
                        throw new UserInformationException(string.Format("No filelists"), "NoMatchingRemoteFilelists");
                    }

                    // If we are updating, all files should be accounted for
                    foreach (var fl in remotefiles)
                    {
                        volumeIds[fl.File.Name] = updating ? restoredb.GetRemoteVolumeID(fl.File.Name) : restoredb.RegisterRemoteVolume(fl.File.Name, fl.FileType, fl.File.Size, RemoteVolumeState.Uploaded);
                    }

                    var hasUpdatedOptions = false;

                    if (updating)
                    {
                        Utility.UpdateOptionsFromDb(restoredb, m_options);
                        Utility.VerifyParameters(restoredb, m_options);
                    }

                    //Record all blocksets and files needed
                    using (var tr = restoredb.BeginTransaction())
                    {
                        var filelistWork = (from n in filelists orderby n.Time select new RemoteVolume(n.File) as IRemoteVolume).ToList();
                        Logging.Log.WriteInformationMessage(LOGTAG, "RebuildStarted", "Rebuild database started, downloading {0} filelists", filelistWork.Count);

                        var progress = 0;

                        // Register the files we are working with, if not already updated
                        if (updating)
                        {
                            foreach (var n in filelists)
                            {
                                if (volumeIds[n.File.Name] == -1)
                                {
                                    volumeIds[n.File.Name] = restoredb.RegisterRemoteVolume(n.File.Name, n.FileType, RemoteVolumeState.Uploaded, n.File.Size, new TimeSpan(0), tr);
                                }
                            }
                        }

                        var isFirstFilelist = true;
                        var blocksize       = m_options.Blocksize;
                        var hashes_pr_block = blocksize / m_options.BlockhashSize;

                        foreach (var entry in new AsyncDownloader(filelistWork, backend))
                        {
                            try
                            {
                                if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                                {
                                    backend.WaitForComplete(restoredb, null);
                                    m_result.EndTime = DateTime.UtcNow;
                                    return;
                                }

                                progress++;
                                if (filelistWork.Count == 1 && m_options.RepairOnlyPaths)
                                {
                                    m_result.OperationProgressUpdater.UpdateProgress(0.5f);
                                }
                                else
                                {
                                    m_result.OperationProgressUpdater.UpdateProgress(((float)progress / filelistWork.Count()) * (m_options.RepairOnlyPaths ? 1f : 0.2f));
                                }

                                using (var tmpfile = entry.TempFile)
                                {
                                    isFirstFilelist = false;

                                    if (entry.Hash != null && entry.Size > 0)
                                    {
                                        restoredb.UpdateRemoteVolume(entry.Name, RemoteVolumeState.Verified, entry.Size, entry.Hash, tr);
                                    }

                                    var parsed = VolumeBase.ParseFilename(entry.Name);

                                    if (!hasUpdatedOptions && !updating)
                                    {
                                        VolumeReaderBase.UpdateOptionsFromManifest(parsed.CompressionModule, tmpfile, m_options);
                                        hasUpdatedOptions = true;
                                        // Recompute the cached sizes
                                        blocksize       = m_options.Blocksize;
                                        hashes_pr_block = blocksize / m_options.BlockhashSize;
                                    }


                                    // Create timestamped operations based on the file timestamp
                                    var filesetid = restoredb.CreateFileset(volumeIds[entry.Name], parsed.Time, tr);
                                    using (var filelistreader = new FilesetVolumeReader(parsed.CompressionModule, tmpfile, m_options))
                                        foreach (var fe in filelistreader.Files.Where(x => Library.Utility.FilterExpression.Matches(filter, x.Path)))
                                        {
                                            try
                                            {
                                                var expectedmetablocks          = (fe.Metasize + blocksize - 1) / blocksize;
                                                var expectedmetablocklisthashes = (expectedmetablocks + hashes_pr_block - 1) / hashes_pr_block;
                                                if (expectedmetablocks <= 1)
                                                {
                                                    expectedmetablocklisthashes = 0;
                                                }

                                                var metadataid = long.MinValue;
                                                switch (fe.Type)
                                                {
                                                case FilelistEntryType.Folder:
                                                    metadataid = restoredb.AddMetadataset(fe.Metahash, fe.Metasize, fe.MetaBlocklistHashes, expectedmetablocklisthashes, tr);
                                                    restoredb.AddDirectoryEntry(filesetid, fe.Path, fe.Time, metadataid, tr);
                                                    break;

                                                case FilelistEntryType.File:
                                                    var expectedblocks          = (fe.Size + blocksize - 1) / blocksize;
                                                    var expectedblocklisthashes = (expectedblocks + hashes_pr_block - 1) / hashes_pr_block;
                                                    if (expectedblocks <= 1)
                                                    {
                                                        expectedblocklisthashes = 0;
                                                    }

                                                    var blocksetid = restoredb.AddBlockset(fe.Hash, fe.Size, fe.BlocklistHashes, expectedblocklisthashes, tr);
                                                    metadataid = restoredb.AddMetadataset(fe.Metahash, fe.Metasize, fe.MetaBlocklistHashes, expectedmetablocklisthashes, tr);
                                                    restoredb.AddFileEntry(filesetid, fe.Path, fe.Time, blocksetid, metadataid, tr);

                                                    if (fe.Size <= blocksize)
                                                    {
                                                        if (!string.IsNullOrWhiteSpace(fe.Blockhash))
                                                        {
                                                            restoredb.AddSmallBlocksetLink(fe.Hash, fe.Blockhash, fe.Blocksize, tr);
                                                        }
                                                        else if (m_options.BlockHashAlgorithm == m_options.FileHashAlgorithm)
                                                        {
                                                            restoredb.AddSmallBlocksetLink(fe.Hash, fe.Hash, fe.Size, tr);
                                                        }
                                                        else
                                                        {
                                                            Logging.Log.WriteWarningMessage(LOGTAG, "MissingBlockHash", null, "No block hash found for file: {0}", fe.Path);
                                                        }
                                                    }

                                                    break;

                                                case FilelistEntryType.Symlink:
                                                    metadataid = restoredb.AddMetadataset(fe.Metahash, fe.Metasize, fe.MetaBlocklistHashes, expectedmetablocklisthashes, tr);
                                                    restoredb.AddSymlinkEntry(filesetid, fe.Path, fe.Time, metadataid, tr);
                                                    break;

                                                default:
                                                    Logging.Log.WriteWarningMessage(LOGTAG, "SkippingUnknownFileEntry", null, "Skipping file-entry with unknown type {0}: {1} ", fe.Type, fe.Path);
                                                    break;
                                                }

                                                if (fe.Metasize <= blocksize && (fe.Type == FilelistEntryType.Folder || fe.Type == FilelistEntryType.File || fe.Type == FilelistEntryType.Symlink))
                                                {
                                                    if (!string.IsNullOrWhiteSpace(fe.Metablockhash))
                                                    {
                                                        restoredb.AddSmallBlocksetLink(fe.Metahash, fe.Metablockhash, fe.Metasize, tr);
                                                    }
                                                    else if (m_options.BlockHashAlgorithm == m_options.FileHashAlgorithm)
                                                    {
                                                        restoredb.AddSmallBlocksetLink(fe.Metahash, fe.Metahash, fe.Metasize, tr);
                                                    }
                                                    else
                                                    {
                                                        Logging.Log.WriteWarningMessage(LOGTAG, "MissingMetadataBlockHash", null, "No block hash found for file metadata: {0}", fe.Path);
                                                    }
                                                }
                                            }
                                            catch (Exception ex)
                                            {
                                                Logging.Log.WriteWarningMessage(LOGTAG, "FileEntryProcessingFailed", ex, "Failed to process file-entry: {0}", fe.Path);
                                            }
                                        }
                                }
                            }
                            catch (Exception ex)
                            {
                                Logging.Log.WriteWarningMessage(LOGTAG, "FileProcessingFailed", ex, "Failed to process file: {0}", entry.Name);
                                if (ex is System.Threading.ThreadAbortException)
                                {
                                    m_result.EndTime = DateTime.UtcNow;
                                    throw;
                                }

                                if (isFirstFilelist && ex is System.Security.Cryptography.CryptographicException)
                                {
                                    m_result.EndTime = DateTime.UtcNow;
                                    throw;
                                }
                            }
                        }

                        //Make sure we write the config
                        if (!updating)
                        {
                            Utility.VerifyParameters(restoredb, m_options, tr);
                        }

                        using (new Logging.Timer(LOGTAG, "CommitUpdateFilesetFromRemote", "CommitUpdateFilesetFromRemote"))
                            tr.Commit();
                    }

                    if (!m_options.RepairOnlyPaths)
                    {
                        var hashalg = Library.Utility.HashAlgorithmHelper.Create(m_options.BlockHashAlgorithm);
                        if (hashalg == null)
                        {
                            throw new UserInformationException(Strings.Common.InvalidHashAlgorithm(m_options.BlockHashAlgorithm), "BlockHashAlgorithmNotSupported");
                        }
                        var hashsize = hashalg.HashSize / 8;

                        //Grab all index files, and update the block table
                        using (var tr = restoredb.BeginTransaction())
                        {
                            var indexfiles = (
                                from n in remotefiles
                                where n.FileType == RemoteVolumeType.Index
                                select new RemoteVolume(n.File) as IRemoteVolume).ToList();

                            Logging.Log.WriteInformationMessage(LOGTAG, "FilelistsRestored", "Filelists restored, downloading {0} index files", indexfiles.Count);

                            var progress = 0;

                            foreach (var sf in new AsyncDownloader(indexfiles, backend))
                            {
                                try
                                {
                                    if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                                    {
                                        backend.WaitForComplete(restoredb, null);
                                        m_result.EndTime = DateTime.UtcNow;
                                        return;
                                    }

                                    progress++;
                                    m_result.OperationProgressUpdater.UpdateProgress((((float)progress / indexfiles.Count) * 0.5f) + 0.2f);

                                    using (var tmpfile = sf.TempFile)
                                    {
                                        if (sf.Hash != null && sf.Size > 0)
                                        {
                                            restoredb.UpdateRemoteVolume(sf.Name, RemoteVolumeState.Verified, sf.Size, sf.Hash, tr);
                                        }

                                        using (var svr = new IndexVolumeReader(RestoreHandler.GetCompressionModule(sf.Name), tmpfile, m_options, hashsize))
                                        {
                                            foreach (var a in svr.Volumes)
                                            {
                                                var filename = a.Filename;
                                                var volumeID = restoredb.GetRemoteVolumeID(filename);

                                                // No such file
                                                if (volumeID < 0)
                                                {
                                                    volumeID = ProbeForMatchingFilename(ref filename, restoredb);
                                                }

                                                // Still broken, register a missing item
                                                if (volumeID < 0)
                                                {
                                                    var p = VolumeBase.ParseFilename(filename);
                                                    if (p == null)
                                                    {
                                                        throw new Exception(string.Format("Unable to parse filename: {0}", filename));
                                                    }
                                                    Logging.Log.WriteErrorMessage(LOGTAG, "MissingFileDetected", null, "Remote file referenced as {0}, but not found in list, registering a missing remote file", filename);
                                                    volumeID = restoredb.RegisterRemoteVolume(filename, p.FileType, RemoteVolumeState.Verified, tr);
                                                }

                                                //Add all block/volume mappings
                                                foreach (var b in a.Blocks)
                                                {
                                                    restoredb.UpdateBlock(b.Key, b.Value, volumeID, tr);
                                                }

                                                restoredb.UpdateRemoteVolume(filename, RemoteVolumeState.Verified, a.Length, a.Hash, tr);
                                                restoredb.AddIndexBlockLink(restoredb.GetRemoteVolumeID(sf.Name), volumeID, tr);
                                            }

                                            //If there are blocklists in the index file, update the blocklists
                                            foreach (var b in svr.BlockLists)
                                            {
                                                restoredb.UpdateBlockset(b.Hash, b.Blocklist, tr);
                                            }
                                        }
                                    }
                                }
                                catch (Exception ex)
                                {
                                    //Not fatal
                                    Logging.Log.WriteErrorMessage(LOGTAG, "IndexFileProcessingFailed", ex, "Failed to process index file: {0}", sf.Name);
                                    if (ex is System.Threading.ThreadAbortException)
                                    {
                                        m_result.EndTime = DateTime.UtcNow;
                                        throw;
                                    }
                                }
                            }

                            using (new Logging.Timer(LOGTAG, "CommitRecreateDb", "CommitRecreatedDb"))
                                tr.Commit();

                            // TODO: In some cases, we can avoid downloading all index files,
                            // if we are lucky and pick the right ones
                        }

                        // We have now grabbed as much information as possible,
                        // if we are still missing data, we must now fetch block files
                        restoredb.FindMissingBlocklistHashes(hashsize, m_options.Blocksize, null);

                        //We do this in three passes
                        for (var i = 0; i < 3; i++)
                        {
                            // Grab the list matching the pass type
                            var lst = restoredb.GetMissingBlockListVolumes(i, m_options.Blocksize, hashsize).ToList();
                            if (lst.Count > 0)
                            {
                                var fullist = ": " + string.Join(", ", lst.Select(x => x.Name));
                                switch (i)
                                {
                                case 0:
                                    Logging.Log.WriteVerboseMessage(LOGTAG, "ProcessingRequiredBlocklistVolumes", "Processing required {0} blocklist volumes{1}", lst.Count, fullist);
                                    Logging.Log.WriteInformationMessage(LOGTAG, "ProcessingRequiredBlocklistVolumes", "Processing required {0} blocklist volumes{1}", lst.Count, m_options.FullResult ? fullist : string.Empty);
                                    break;

                                case 1:
                                    Logging.Log.WriteVerboseMessage(LOGTAG, "ProbingCandicateBlocklistVolumes", "Probing {0} candidate blocklist volumes{1}", lst.Count, fullist);
                                    Logging.Log.WriteInformationMessage(LOGTAG, "ProbingCandicateBlocklistVolumes", "Probing {0} candidate blocklist volumes{1}", lst.Count, m_options.FullResult ? fullist : string.Empty);
                                    break;

                                default:
                                    Logging.Log.WriteVerboseMessage(LOGTAG, "ProcessingAllBlocklistVolumes", "Processing all of the {0} volumes for blocklists{1}", lst.Count, fullist);
                                    Logging.Log.WriteVerboseMessage(LOGTAG, "ProcessingAllBlocklistVolumes", "Processing all of the {0} volumes for blocklists{1}", lst.Count, m_options.FullResult ? fullist : string.Empty);
                                    break;
                                }
                            }

                            var progress = 0;
                            foreach (var sf in new AsyncDownloader(lst, backend))
                            {
                                using (var tmpfile = sf.TempFile)
                                    using (var rd = new BlockVolumeReader(RestoreHandler.GetCompressionModule(sf.Name), tmpfile, m_options))
                                        using (var tr = restoredb.BeginTransaction())
                                        {
                                            if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                                            {
                                                backend.WaitForComplete(restoredb, null);
                                                m_result.EndTime = DateTime.UtcNow;
                                                return;
                                            }

                                            progress++;
                                            m_result.OperationProgressUpdater.UpdateProgress((((float)progress / lst.Count) * 0.1f) + 0.7f + (i * 0.1f));

                                            var volumeid = restoredb.GetRemoteVolumeID(sf.Name);

                                            restoredb.UpdateRemoteVolume(sf.Name, RemoteVolumeState.Uploaded, sf.Size, sf.Hash, tr);

                                            // Update the block table so we know about the block/volume map
                                            foreach (var h in rd.Blocks)
                                            {
                                                restoredb.UpdateBlock(h.Key, h.Value, volumeid, tr);
                                            }

                                            // Grab all known blocklists from the volume
                                            foreach (var blocklisthash in restoredb.GetBlockLists(volumeid))
                                            {
                                                restoredb.UpdateBlockset(blocklisthash, rd.ReadBlocklist(blocklisthash, hashsize), tr);
                                            }

                                            // Update tables so we know if we are done
                                            restoredb.FindMissingBlocklistHashes(hashsize, m_options.Blocksize, tr);

                                            using (new Logging.Timer(LOGTAG, "CommitRestoredBlocklist", "CommitRestoredBlocklist"))
                                                tr.Commit();

                                            //At this point we can patch files with data from the block volume
                                            if (blockprocessor != null)
                                            {
                                                blockprocessor(sf.Name, rd);
                                            }
                                        }
                            }
                        }
                    }

                    backend.WaitForComplete(restoredb, null);

                    if (m_options.RepairOnlyPaths)
                    {
                        Logging.Log.WriteInformationMessage(LOGTAG, "RecreateOrUpdateOnly", "Recreate/path-update completed, not running consistency checks");
                    }
                    else
                    {
                        Logging.Log.WriteInformationMessage(LOGTAG, "RecreateCompletedCheckingDatabase", "Recreate completed, verifying the database consistency");

                        //All done, we must verify that we have all blocklist fully intact
                        // if this fails, the db will not be deleted, so it can be used,
                        // except to continue a backup
                        m_result.EndTime = DateTime.UtcNow;

                        using (var lbfdb = new LocalListBrokenFilesDatabase(restoredb))
                        {
                            var broken = lbfdb.GetBrokenFilesets(new DateTime(0), null, null).Count();
                            if (broken != 0)
                            {
                                throw new UserInformationException(string.Format("Recreated database has missing blocks and {0} broken filelists. Consider using \"{1}\" and \"{2}\" to purge broken data from the remote store and the database.", broken, "list-broken-files", "purge-broken-files"), "DatabaseIsBrokenConsiderPurge");
                            }
                        }

                        restoredb.VerifyConsistency(m_options.Blocksize, m_options.BlockhashSize, true, null);

                        Logging.Log.WriteInformationMessage(LOGTAG, "RecreateCompleted", "Recreate completed, and consistency checks completed, marking database as complete");

                        restoredb.RepairInProgress = false;
                    }

                    m_result.EndTime = DateTime.UtcNow;
                }
        }
Exemplo n.º 5
0
        private async Task RunAsync(string[] sources, Library.Utility.IFilter filter)
        {
            m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_Begin);

            // New isolated scope for each operation
            using (new IsolatedChannelScope())
                using (m_database = new LocalBackupDatabase(m_options.Dbpath, m_options))
                {
                    m_result.SetDatabase(m_database);
                    m_result.Dryrun = m_options.Dryrun;

                    // Check the database integrity
                    Utility.UpdateOptionsFromDb(m_database, m_options);
                    Utility.VerifyParameters(m_database, m_options);

                    var probe_path = m_database.GetFirstPath();
                    if (probe_path != null && Duplicati.Library.Utility.Utility.GuessDirSeparator(probe_path) != System.IO.Path.DirectorySeparatorChar.ToString())
                    {
                        throw new UserInformationException(string.Format("The backup contains files that belong to another operating system. Proceeding with a backup would cause the database to contain paths from two different operation systems, which is not supported. To proceed without losing remote data, delete all filesets and make sure the --{0} option is set, then run the backup again to re-use the existing data on the remote store.", "no-auto-compact"), "CrossOsDatabaseReuseNotSupported");
                    }

                    if (m_database.PartiallyRecreated)
                    {
                        throw new UserInformationException("The database was only partially recreated. This database may be incomplete and the repair process is not allowed to alter remote files as that could result in data loss.", "DatabaseIsPartiallyRecreated");
                    }

                    if (m_database.RepairInProgress)
                    {
                        throw new UserInformationException("The database was attempted repaired, but the repair did not complete. This database may be incomplete and the backup process cannot continue. You may delete the local database and attempt to repair it again.", "DatabaseRepairInProgress");
                    }

                    // If there is no filter, we set an empty filter to simplify the code
                    // If there is a filter, we make sure that the sources are included
                    m_filter       = filter ?? new Library.Utility.FilterExpression();
                    m_sourceFilter = new Library.Utility.FilterExpression(sources, true);

                    Task parallelScanner = null;
                    Task uploader        = null;
                    try
                    {
                        // Setup runners and instances here
                        using (var db = new Backup.BackupDatabase(m_database, m_options))
                            using (var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, m_database))
                                using (var filesetvolume = new FilesetVolumeWriter(m_options, m_database.OperationTimestamp))
                                    using (var stats = new Backup.BackupStatsCollector(m_result))
                                        using (var bk = new Common.BackendHandler(m_options, m_backendurl, db, stats, m_result.TaskReader))
                                            // Keep a reference to these channels to avoid shutdown
                                            using (var uploadtarget = ChannelManager.GetChannel(Backup.Channels.BackendRequest.ForWrite))
                                            {
                                                long filesetid;
                                                var  counterToken = new CancellationTokenSource();
                                                using (var snapshot = GetSnapshot(sources, m_options))
                                                {
                                                    try
                                                    {
                                                        // Start parallel scan, or use the database
                                                        if (m_options.DisableFileScanner)
                                                        {
                                                            var d = m_database.GetLastBackupFileCountAndSize();
                                                            m_result.OperationProgressUpdater.UpdatefileCount(d.Item1, d.Item2, true);
                                                        }
                                                        else
                                                        {
                                                            parallelScanner = Backup.CountFilesHandler.Run(sources, snapshot, m_result, m_options, m_sourceFilter, m_filter, m_result.TaskReader, counterToken.Token);
                                                        }

                                                        // Make sure the database is sane
                                                        await db.VerifyConsistencyAsync(m_options.Blocksize, m_options.BlockhashSize, true);

                                                        // Start the uploader process
                                                        uploader = Backup.BackendUploader.Run(bk, m_options, db, m_result, m_result.TaskReader, stats);

                                                        // If we have an interrupted backup, grab the
                                                        string lasttempfilelist = null;
                                                        long   lasttempfileid   = -1;
                                                        if (!m_options.DisableSyntheticFilelist)
                                                        {
                                                            var candidates = (await db.GetIncompleteFilesetsAsync()).OrderBy(x => x.Value).ToArray();
                                                            if (candidates.Length > 0)
                                                            {
                                                                lasttempfileid   = candidates.Last().Key;
                                                                lasttempfilelist = m_database.GetRemoteVolumeFromID(lasttempfileid).Name;
                                                            }
                                                        }

                                                        // TODO: Rewrite to using the uploader process, or the BackendHandler interface
                                                        // Do a remote verification, unless disabled
                                                        PreBackupVerify(backend, lasttempfilelist);

                                                        // If the previous backup was interrupted, send a synthetic list
                                                        await Backup.UploadSyntheticFilelist.Run(db, m_options, m_result, m_result.TaskReader, lasttempfilelist, lasttempfileid);

                                                        // Grab the previous backup ID, if any
                                                        var prevfileset = m_database.FilesetTimes.FirstOrDefault();
                                                        if (prevfileset.Value.ToUniversalTime() > m_database.OperationTimestamp.ToUniversalTime())
                                                        {
                                                            throw new Exception(string.Format("The previous backup has time {0}, but this backup has time {1}. Something is wrong with the clock.", prevfileset.Value.ToLocalTime(), m_database.OperationTimestamp.ToLocalTime()));
                                                        }

                                                        var lastfilesetid = prevfileset.Value.Ticks == 0 ? -1 : prevfileset.Key;

                                                        // Rebuild any index files that are missing
                                                        await Backup.RecreateMissingIndexFiles.Run(db, m_options, m_result, m_result.TaskReader);

                                                        // This should be removed as the lookups are no longer used
                                                        m_database.BuildLookupTable(m_options);

                                                        // Prepare the operation by registering the filelist
                                                        m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_ProcessingFiles);

                                                        var repcnt = 0;
                                                        while (repcnt < 100 && await db.GetRemoteVolumeIDAsync(filesetvolume.RemoteFilename) >= 0)
                                                        {
                                                            filesetvolume.ResetRemoteFilename(m_options, m_database.OperationTimestamp.AddSeconds(repcnt++));
                                                        }

                                                        if (await db.GetRemoteVolumeIDAsync(filesetvolume.RemoteFilename) >= 0)
                                                        {
                                                            throw new Exception("Unable to generate a unique fileset name");
                                                        }

                                                        var filesetvolumeid = await db.RegisterRemoteVolumeAsync(filesetvolume.RemoteFilename, RemoteVolumeType.Files, RemoteVolumeState.Temporary);

                                                        filesetid = await db.CreateFilesetAsync(filesetvolumeid, VolumeBase.ParseFilename(filesetvolume.RemoteFilename).Time);

                                                        // create USN-based scanner if enabled
                                                        var journalService = GetJournalService(sources, snapshot, filter, lastfilesetid);

                                                        // Run the backup operation
                                                        if (await m_result.TaskReader.ProgressAsync)
                                                        {
                                                            await RunMainOperation(sources, snapshot, journalService, db, stats, m_options, m_sourceFilter, m_filter, m_result, m_result.TaskReader, lastfilesetid).ConfigureAwait(false);
                                                        }
                                                    }
                                                    finally
                                                    {
                                                        //If the scanner is still running for some reason, make sure we kill it now
                                                        counterToken.Cancel();
                                                    }
                                                }

                                                // Ensure the database is in a sane state after adding data
                                                using (new Logging.Timer(LOGTAG, "VerifyConsistency", "VerifyConsistency"))
                                                    await db.VerifyConsistencyAsync(m_options.Blocksize, m_options.BlockhashSize, false);

                                                // Send the actual filelist
                                                if (await m_result.TaskReader.ProgressAsync)
                                                {
                                                    await Backup.UploadRealFilelist.Run(m_result, db, m_options, filesetvolume, filesetid, m_result.TaskReader);
                                                }

                                                // Wait for upload completion
                                                m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_WaitForUpload);
                                                var lastVolumeSize = await FlushBackend(m_result, uploadtarget, uploader).ConfigureAwait(false);

                                                // Make sure we have the database up-to-date
                                                await db.CommitTransactionAsync("CommitAfterUpload", false);

                                                // TODO: Remove this later
                                                m_transaction = m_database.BeginTransaction();

                                                if (await m_result.TaskReader.ProgressAsync)
                                                {
                                                    CompactIfRequired(backend, lastVolumeSize);
                                                }

                                                if (m_options.UploadVerificationFile && await m_result.TaskReader.ProgressAsync)
                                                {
                                                    m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_VerificationUpload);
                                                    FilelistProcessor.UploadVerificationFile(backend.BackendUrl, m_options, m_result.BackendWriter, m_database, m_transaction);
                                                }

                                                if (m_options.Dryrun)
                                                {
                                                    m_transaction.Rollback();
                                                    m_transaction = null;
                                                }
                                                else
                                                {
                                                    using (new Logging.Timer(LOGTAG, "CommitFinalizingBackup", "CommitFinalizingBackup"))
                                                        m_transaction.Commit();

                                                    m_transaction = null;

                                                    if (m_result.TaskControlRendevouz() != TaskControlState.Stop)
                                                    {
                                                        if (m_options.NoBackendverification)
                                                        {
                                                            UpdateStorageStatsFromDatabase();
                                                        }
                                                        else
                                                        {
                                                            PostBackupVerification();
                                                        }
                                                    }
                                                }

                                                m_database.WriteResults();
                                                m_database.PurgeLogData(m_options.LogRetention);
                                                if (m_options.AutoVacuum)
                                                {
                                                    m_database.Vacuum();
                                                }
                                                m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_Complete);
                                                return;
                                            }
                    }
                    catch (Exception ex)
                    {
                        var aex = BuildException(ex, uploader, parallelScanner);
                        Logging.Log.WriteErrorMessage(LOGTAG, "FatalError", ex, "Fatal error");
                        if (aex == ex)
                        {
                            throw;
                        }

                        throw aex;
                    }
                    finally
                    {
                        if (parallelScanner != null && !parallelScanner.IsCompleted)
                        {
                            parallelScanner.Wait(500);
                        }

                        // TODO: We want to commit? always?
                        if (m_transaction != null)
                        {
                            try { m_transaction.Rollback(); }
                            catch (Exception ex) { Logging.Log.WriteErrorMessage(LOGTAG, "RollbackError", ex, "Rollback error: {0}", ex.Message); }
                        }
                    }
                }
        }
Exemplo n.º 6
0
        /// <summary>
        /// Run the recreate procedure
        /// </summary>
        /// <param name="path">Path to the database that will be created</param>
        /// <param name="filelistfilter">A filter that can be used to disregard certain remote files, intended to be used to select a certain filelist</param>
        /// <param name="filenamefilter">Filters the files in a filelist to prevent downloading unwanted data</param>
        /// <param name="blockprocessor">A callback hook that can be used to work with downloaded block volumes, intended to be use to recover data blocks while processing blocklists</param>
        internal void DoRun(LocalDatabase dbparent, Library.Utility.IFilter filter = null, NumberedFilterFilelistDelegate filelistfilter = null, BlockVolumePostProcessor blockprocessor = null)
        {
            var hashalg = System.Security.Cryptography.HashAlgorithm.Create(m_options.BlockHashAlgorithm);

            if (hashalg == null)
            {
                throw new Exception(Strings.Foresthash.InvalidHashAlgorithm(m_options.BlockHashAlgorithm));
            }
            var hashsize = hashalg.HashSize / 8;

            m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Recreate_Running);

            //We build a local database in steps.
            using (var restoredb = new LocalRecreateDatabase(dbparent, m_options))
                using (var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, restoredb))
                {
                    var volumeIds = new Dictionary <string, long>();

                    var rawlist = backend.List();

                    //First step is to examine the remote storage to see what
                    // kind of data we can find
                    var remotefiles =
                        (from x in rawlist
                         let n = VolumeBase.ParseFilename(x)
                                 where
                                 n != null
                                 &&
                                 n.Prefix == m_options.Prefix
                                 select n).ToArray(); //ToArray() ensures that we do not remote-request it multiple times

                    if (remotefiles.Length == 0)
                    {
                        if (rawlist.Count == 0)
                        {
                            throw new Exception("No files were found at the remote location, perhaps the target url is incorrect?");
                        }
                        else
                        {
                            var tmp =
                                (from x in rawlist
                                 let n = VolumeBase.ParseFilename(x)
                                         where
                                         n != null
                                         select n.Prefix).ToArray();

                            var types = tmp.Distinct().ToArray();
                            if (tmp.Length == 0)
                            {
                                throw new Exception(string.Format("Found {0} files at the remote storage, but none that could be parsed", rawlist.Count));
                            }
                            else if (types.Length == 1)
                            {
                                throw new Exception(string.Format("Found {0} parse-able files with the prefix {1}, did you forget to set the backup-prefix?", tmp.Length, types[0]));
                            }
                            else
                            {
                                throw new Exception(string.Format("Found {0} parse-able files (of {1} files) with different prefixes: {2}, did you forget to set the backup-prefix?", tmp.Length, rawlist.Count, string.Join(", ", types)));
                            }
                        }
                    }

                    //Then we select the filelist we should work with,
                    // and create the filelist table to fit
                    IEnumerable <IParsedVolume> filelists =
                        from n in remotefiles
                        where n.FileType == RemoteVolumeType.Files
                        orderby n.Time descending
                        select n;

                    if (filelistfilter != null)
                    {
                        filelists = filelistfilter(filelists).Select(x => x.Value).ToArray();
                    }

                    foreach (var fl in remotefiles)
                    {
                        volumeIds[fl.File.Name] = restoredb.RegisterRemoteVolume(fl.File.Name, fl.FileType, RemoteVolumeState.Uploaded);
                    }


                    //Record all blocksets and files needed
                    using (var tr = restoredb.BeginTransaction())
                    {
                        var filelistWork = (from n in filelists orderby n.Time select new RemoteVolume(n.File) as IRemoteVolume).ToList();
                        var progress     = 0;

                        foreach (var entry in new AsyncDownloader(filelistWork, backend))
                        {
                            try
                            {
                                if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                                {
                                    backend.WaitForComplete(restoredb, null);
                                    return;
                                }

                                progress++;
                                m_result.OperationProgressUpdater.UpdateProgress(((float)progress / filelistWork.Count()) * 0.2f);

                                using (var tmpfile = entry.TempFile)
                                {
                                    if (entry.Hash != null && entry.Size > 0)
                                    {
                                        restoredb.UpdateRemoteVolume(entry.Name, RemoteVolumeState.Verified, entry.Size, entry.Hash, tr);
                                    }

                                    var parsed = VolumeBase.ParseFilename(entry.Name);
                                    // Create timestamped operations based on the file timestamp
                                    var filesetid = restoredb.CreateFileset(volumeIds[entry.Name], parsed.Time, tr);
                                    using (var filelistreader = new FilesetVolumeReader(parsed.CompressionModule, tmpfile, m_options))
                                        foreach (var fe in filelistreader.Files.Where(x => Library.Utility.FilterExpression.Matches(filter, x.Path)))
                                        {
                                            try
                                            {
                                                if (fe.Type == FilelistEntryType.Folder)
                                                {
                                                    restoredb.AddDirectoryEntry(filesetid, fe.Path, fe.Time, fe.Metahash, fe.Metahash == null ? -1 : fe.Metasize, tr);
                                                }
                                                else if (fe.Type == FilelistEntryType.File)
                                                {
                                                    var blocksetid = restoredb.AddBlockset(fe.Hash, fe.Size, fe.BlocklistHashes, tr);
                                                    restoredb.AddFileEntry(filesetid, fe.Path, fe.Time, blocksetid, fe.Metahash, fe.Metahash == null ? -1 : fe.Metasize, tr);
                                                }
                                                else if (fe.Type == FilelistEntryType.Symlink)
                                                {
                                                    restoredb.AddSymlinkEntry(filesetid, fe.Path, fe.Time, fe.Metahash, fe.Metahash == null ? -1 : fe.Metasize, tr);
                                                }
                                                else
                                                {
                                                    m_result.AddWarning(string.Format("Skipping file-entry with unknown type {0}: {1} ", fe.Type, fe.Path), null);
                                                }
                                            }
                                            catch (Exception ex)
                                            {
                                                m_result.AddWarning(string.Format("Failed to process file-entry: {0}", fe.Path), ex);
                                            }
                                        }
                                }
                            }
                            catch (Exception ex)
                            {
                                m_result.AddWarning(string.Format("Failed to process file: {0}", entry.Name), ex);
                                if (ex is System.Threading.ThreadAbortException)
                                {
                                    throw;
                                }
                            }
                        }

                        using (new Logging.Timer("CommitUpdateFilesetFromRemote"))
                            tr.Commit();
                    }

                    //Grab all index files, and update the block table
                    using (var tr = restoredb.BeginTransaction())
                    {
                        var indexfiles = (
                            from n in remotefiles
                            where n.FileType == RemoteVolumeType.Index
                            select new RemoteVolume(n.File) as IRemoteVolume).ToList();

                        var progress = 0;

                        foreach (var sf in new AsyncDownloader(indexfiles, backend))
                        {
                            try
                            {
                                if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                                {
                                    backend.WaitForComplete(restoredb, null);
                                    return;
                                }

                                progress++;
                                m_result.OperationProgressUpdater.UpdateProgress((((float)progress / indexfiles.Count) * 0.5f) + 0.2f);

                                using (var tmpfile = sf.TempFile)
                                {
                                    if (sf.Hash != null && sf.Size > 0)
                                    {
                                        restoredb.UpdateRemoteVolume(sf.Name, RemoteVolumeState.Verified, sf.Size, sf.Hash, tr);
                                    }

                                    using (var svr = new IndexVolumeReader(RestoreHandler.GetCompressionModule(sf.Name), tmpfile, m_options, hashsize))
                                    {
                                        Utility.VerifyParameters(restoredb, m_options);

                                        foreach (var a in svr.Volumes)
                                        {
                                            var volumeID = restoredb.GetRemoteVolumeID(a.Filename);
                                            //Add all block/volume mappings
                                            foreach (var b in a.Blocks)
                                            {
                                                restoredb.UpdateBlock(b.Key, b.Value, volumeID, tr);
                                            }

                                            restoredb.UpdateRemoteVolume(a.Filename, RemoteVolumeState.Verified, a.Length, a.Hash, tr);
                                            restoredb.AddIndexBlockLink(restoredb.GetRemoteVolumeID(sf.Name), volumeID, tr);
                                        }

                                        //If there are blocklists in the index file, update the blocklists
                                        foreach (var b in svr.BlockLists)
                                        {
                                            restoredb.UpdateBlockset(b.Hash, b.Blocklist, tr);
                                        }
                                    }
                                }
                            }
                            catch (Exception ex)
                            {
                                //Not fatal
                                m_result.AddWarning(string.Format("Failed to process index file: {0}", sf.Name), ex);
                                if (ex is System.Threading.ThreadAbortException)
                                {
                                    throw;
                                }
                            }
                        }

                        using (new Logging.Timer("CommitRecreatedDb"))
                            tr.Commit();

                        // TODO: In some cases, we can avoid downloading all index files,
                        // if we are lucky and pick the right ones
                    }

                    // We have now grabbed as much information as possible,
                    // if we are still missing data, we must now fetch block files
                    restoredb.FindMissingBlocklistHashes(hashsize, null);

                    //We do this in three passes
                    for (var i = 0; i < 3; i++)
                    {
                        // Grab the list matching the pass type
                        var lst = restoredb.GetMissingBlockListVolumes(i).ToList();
                        if (lst.Count > 0)
                        {
                            switch (i)
                            {
                            case 0:
                                if (m_options.Verbose)
                                {
                                    m_result.AddVerboseMessage("Processing required {0} blocklist volumes: {1}", lst.Count, string.Join(", ", lst.Select(x => x.Name)));
                                }
                                else
                                {
                                    m_result.AddMessage(string.Format("Processing required {0} blocklist volumes", lst.Count));
                                }
                                break;

                            case 1:
                                if (m_options.Verbose)
                                {
                                    m_result.AddVerboseMessage("Probing {0} candidate blocklist volumes: {1}", lst.Count, string.Join(", ", lst.Select(x => x.Name)));
                                }
                                else
                                {
                                    m_result.AddMessage(string.Format("Probing {0} candidate blocklist volumes", lst.Count));
                                }
                                break;

                            default:
                                if (m_options.Verbose)
                                {
                                    m_result.AddVerboseMessage("Processing all of the {0} volumes for blocklists: {1}", lst.Count, string.Join(", ", lst.Select(x => x.Name)));
                                }
                                else
                                {
                                    m_result.AddMessage(string.Format("Processing all of the {0} volumes for blocklists", lst.Count));
                                }
                                break;
                            }
                        }

                        var progress = 0;
                        foreach (var sf in new AsyncDownloader(lst, backend))
                        {
                            using (var tmpfile = sf.TempFile)
                                using (var rd = new BlockVolumeReader(RestoreHandler.GetCompressionModule(sf.Name), tmpfile, m_options))
                                    using (var tr = restoredb.BeginTransaction())
                                    {
                                        if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                                        {
                                            backend.WaitForComplete(restoredb, null);
                                            return;
                                        }

                                        progress++;
                                        m_result.OperationProgressUpdater.UpdateProgress((((float)progress / lst.Count) * 0.1f) + 0.7f + (i * 0.1f));

                                        var volumeid = restoredb.GetRemoteVolumeID(sf.Name);

                                        restoredb.UpdateRemoteVolume(sf.Name, RemoteVolumeState.Uploaded, sf.Size, sf.Hash, tr);

                                        // Update the block table so we know about the block/volume map
                                        foreach (var h in rd.Blocks)
                                        {
                                            restoredb.UpdateBlock(h.Key, h.Value, volumeid, tr);
                                        }

                                        // Grab all known blocklists from the volume
                                        foreach (var blocklisthash in restoredb.GetBlockLists(volumeid))
                                        {
                                            restoredb.UpdateBlockset(blocklisthash, rd.ReadBlocklist(blocklisthash, hashsize), tr);
                                        }

                                        // Update tables so we know if we are done
                                        restoredb.FindMissingBlocklistHashes(hashsize, tr);

                                        using (new Logging.Timer("CommitRestoredBlocklist"))
                                            tr.Commit();

                                        //At this point we can patch files with data from the block volume
                                        if (blockprocessor != null)
                                        {
                                            blockprocessor(sf.Name, rd);
                                        }
                                    }
                        }
                    }

                    backend.WaitForComplete(restoredb, null);

                    //All done, we must verify that we have all blocklist fully intact
                    // if this fails, the db will not be deleted, so it can be used,
                    // except to continue a backup
                    restoredb.VerifyConsistency(null, m_options.Blocksize, m_options.BlockhashSize);
                }
        }
Exemplo n.º 7
0
        public static int Run(List <string> args, Dictionary <string, string> options, Library.Utility.IFilter filter)
        {
            if (args.Count != 4)
            {
                Console.WriteLine("Invalid argument count ({0} expected 4): {1}{2}", args.Count, Environment.NewLine, string.Join(Environment.NewLine, args));
                return(100);
            }

            string target_compr_module = args[1];

            if (!Library.DynamicLoader.CompressionLoader.Keys.Contains(target_compr_module))
            {
                Console.WriteLine("Target compression module not found: {0}{1}Modules supported: {2}", args[1], Environment.NewLine, string.Join(", ", Library.DynamicLoader.CompressionLoader.Keys));
                return(100);
            }

            var m_Options = new Options(options);

            using (var backend = Library.DynamicLoader.BackendLoader.GetBackend(args[2], options))
            {
                if (backend == null)
                {
                    Console.WriteLine("Backend not found: {0}{1}Backends supported: {2}", args[2], Environment.NewLine, string.Join(", ", Library.DynamicLoader.BackendLoader.Keys));
                    return(100);
                }

                var targetfolder = Path.GetFullPath(args[3]);

                if (!Directory.Exists(args[3]))
                {
                    Console.WriteLine("Creating target folder: {0}", targetfolder);
                    Directory.CreateDirectory(targetfolder);
                }

                Console.WriteLine("Listing files on backend: {0} ...", backend.ProtocolKey);

                var rawlist = backend.List().ToList();

                Console.WriteLine("Found {0} files at remote storage", rawlist.Count);

                var i          = 0;
                var downloaded = 0;
                var errors     = 0;
                var needspass  = 0;

                var remotefiles =
                    (from x in rawlist
                     let n = VolumeBase.ParseFilename(x)
                             where n != null && n.Prefix == m_Options.Prefix
                             select n).ToArray(); //ToArray() ensures that we do not remote-request it multiple times

                if (remotefiles.Length == 0)
                {
                    if (rawlist.Count == 0)
                    {
                        Console.WriteLine("No files were found at the remote location, perhaps the target url is incorrect?");
                    }
                    else
                    {
                        var tmp =
                            (from x in rawlist
                             let n = VolumeBase.ParseFilename(x)
                                     where
                                     n != null
                                     select n.Prefix).ToArray();

                        var types = tmp.Distinct().ToArray();
                        if (tmp.Length == 0)
                        {
                            Console.WriteLine("Found {0} files at the remote storage, but none that could be parsed", rawlist.Count);
                        }
                        else if (types.Length == 1)
                        {
                            Console.WriteLine("Found {0} parse-able files with the prefix {1}, did you forget to set the backup prefix?", tmp.Length, types[0]);
                        }
                        else
                        {
                            Console.WriteLine("Found {0} parse-able files (of {1} files) with different prefixes: {2}, did you forget to set the backup prefix?", tmp.Length, rawlist.Count, string.Join(", ", types));
                        }
                    }

                    return(100);
                }

                bool reencrypt = Library.Utility.Utility.ParseBoolOption(options, "reencrypt");
                bool reupload  = Library.Utility.Utility.ParseBoolOption(options, "reupload");

                // Needs order (Files or Blocks) and Indexes as last because indexes content will be adjusted based on recompressed blocks
                var files   = remotefiles.Where(a => a.FileType == RemoteVolumeType.Files).ToArray();
                var blocks  = remotefiles.Where(a => a.FileType == RemoteVolumeType.Blocks).ToArray();
                var indexes = remotefiles.Where(a => a.FileType == RemoteVolumeType.Index).ToArray();

                remotefiles = files.Concat(blocks).ToArray().Concat(indexes).ToArray();

                Console.WriteLine("Found {0} files which belongs to backup with prefix {1}", remotefiles.Count(), m_Options.Prefix);

                foreach (var remoteFile in remotefiles)
                {
                    try
                    {
                        Console.Write("{0}/{1}: {2}", ++i, remotefiles.Count(), remoteFile.File.Name);

                        var    localFileSource = Path.Combine(targetfolder, remoteFile.File.Name);
                        string localFileTarget;
                        string localFileSourceEncryption = "";

                        if (remoteFile.EncryptionModule != null)
                        {
                            if (string.IsNullOrWhiteSpace(m_Options.Passphrase))
                            {
                                needspass++;
                                Console.WriteLine(" - No passphrase supplied, skipping");
                                continue;
                            }

                            using (var m = Library.DynamicLoader.EncryptionLoader.GetModule(remoteFile.EncryptionModule, m_Options.Passphrase, options))
                                localFileSourceEncryption = m.FilenameExtension;

                            localFileSource = localFileSource.Substring(0, localFileSource.Length - localFileSourceEncryption.Length - 1);
                        }

                        if (remoteFile.CompressionModule != null)
                        {
                            localFileTarget = localFileSource.Substring(0, localFileSource.Length - remoteFile.CompressionModule.Length - 1) + "." + target_compr_module;
                        }
                        else
                        {
                            Console.WriteLine(" - cannot detect compression type");
                            continue;
                        }

                        if ((!reencrypt && File.Exists(localFileTarget)) || (reencrypt && File.Exists(localFileTarget + "." + localFileSourceEncryption)))
                        {
                            Console.WriteLine(" - target file already exist");
                            continue;
                        }

                        if (File.Exists(localFileSource))
                        {
                            File.Delete(localFileSource);
                        }

                        Console.Write(" - downloading ({0})...", Library.Utility.Utility.FormatSizeString(remoteFile.File.Size));

                        DateTime originLastWriteTime;
                        FileInfo destinationFileInfo;

                        using (var tf = new TempFile())
                        {
                            backend.Get(remoteFile.File.Name, tf);
                            originLastWriteTime = new FileInfo(tf).LastWriteTime;
                            downloaded++;

                            if (remoteFile.EncryptionModule != null)
                            {
                                Console.Write(" decrypting ...");
                                using (var m = Library.DynamicLoader.EncryptionLoader.GetModule(remoteFile.EncryptionModule, m_Options.Passphrase, options))
                                    using (var tf2 = new TempFile())
                                    {
                                        m.Decrypt(tf, tf2);
                                        File.Copy(tf2, localFileSource);
                                        File.Delete(tf2);
                                    }
                            }
                            else
                            {
                                File.Copy(tf, localFileSource);
                            }

                            File.Delete(tf);
                            destinationFileInfo = new FileInfo(localFileSource);
                            destinationFileInfo.LastWriteTime = originLastWriteTime;
                        }

                        if (remoteFile.CompressionModule != null)
                        {
                            Console.Write(" recompressing ...");

                            //Recompressing from eg. zip to zip
                            if (localFileSource == localFileTarget)
                            {
                                File.Move(localFileSource, localFileSource + ".same");
                                localFileSource = localFileSource + ".same";
                            }
                            using (var localFileSourceStream = new System.IO.FileStream(localFileSource, FileMode.Open, FileAccess.Read, FileShare.Read))
                                using (var cmOld = Library.DynamicLoader.CompressionLoader.GetArchiveReader(remoteFile.CompressionModule, localFileSourceStream, options))
                                    using (var localFileTargetStream = new FileStream(localFileTarget, FileMode.Create, FileAccess.Write, FileShare.Delete))
                                        using (var cmNew = Library.DynamicLoader.CompressionLoader.GetArchiveWriter(target_compr_module, localFileTargetStream, options))
                                            foreach (var cmfile in cmOld.ListFiles(""))
                                            {
                                                string cmfileNew    = cmfile;
                                                var    cmFileVolume = VolumeBase.ParseFilename(cmfileNew);

                                                if (remoteFile.FileType == RemoteVolumeType.Index && cmFileVolume != null && cmFileVolume.FileType == RemoteVolumeType.Blocks)
                                                {
                                                    // Correct inner filename extension to target compression type
                                                    cmfileNew = cmfileNew.Replace("." + cmFileVolume.CompressionModule, "." + target_compr_module);
                                                    if (!reencrypt)
                                                    {
                                                        cmfileNew = cmfileNew.Replace("." + cmFileVolume.EncryptionModule, "");
                                                    }

                                                    //Because compression changes blocks file sizes - needs to be updated
                                                    string textJSON;
                                                    using (var sourceStream = cmOld.OpenRead(cmfile))
                                                        using (var sourceStreamReader = new StreamReader(sourceStream))
                                                        {
                                                            textJSON = sourceStreamReader.ReadToEnd();
                                                            JToken token          = JObject.Parse(textJSON);
                                                            var    fileInfoBlocks = new FileInfo(Path.Combine(targetfolder, cmfileNew.Replace("vol/", "")));
                                                            var    filehasher     = HashAlgorithmHelper.Create(m_Options.FileHashAlgorithm);

                                                            using (var fileStream = fileInfoBlocks.Open(FileMode.Open))
                                                            {
                                                                fileStream.Position = 0;
                                                                token["volumehash"] = Convert.ToBase64String(filehasher.ComputeHash(fileStream));
                                                                fileStream.Close();
                                                            }

                                                            token["volumesize"] = fileInfoBlocks.Length;
                                                            textJSON            = token.ToString();
                                                        }

                                                    using (var sourceStream = new MemoryStream(System.Text.Encoding.UTF8.GetBytes(textJSON)))
                                                        using (var cs = cmNew.CreateFile(cmfileNew, Library.Interface.CompressionHint.Compressible, cmOld.GetLastWriteTime(cmfile)))
                                                            Library.Utility.Utility.CopyStream(sourceStream, cs);
                                                }
                                                else
                                                {
                                                    using (var sourceStream = cmOld.OpenRead(cmfile))
                                                        using (var cs = cmNew.CreateFile(cmfileNew, Library.Interface.CompressionHint.Compressible, cmOld.GetLastWriteTime(cmfile)))
                                                            Library.Utility.Utility.CopyStream(sourceStream, cs);
                                                }
                                            }

                            File.Delete(localFileSource);
                            destinationFileInfo = new FileInfo(localFileTarget);
                            destinationFileInfo.LastWriteTime = originLastWriteTime;
                        }

                        if (reencrypt && remoteFile.EncryptionModule != null)
                        {
                            Console.Write(" reencrypting ...");
                            using (var m = Library.DynamicLoader.EncryptionLoader.GetModule(remoteFile.EncryptionModule, m_Options.Passphrase, options))
                            {
                                m.Encrypt(localFileTarget, localFileTarget + "." + localFileSourceEncryption);
                                File.Delete(localFileTarget);
                                localFileTarget = localFileTarget + "." + localFileSourceEncryption;
                            }

                            destinationFileInfo = new FileInfo(localFileTarget);
                            destinationFileInfo.LastWriteTime = originLastWriteTime;
                        }

                        if (reupload)
                        {
                            Console.Write(" reuploading ...");
                            backend.Put((new FileInfo(localFileTarget)).Name, localFileTarget);
                            backend.Delete(remoteFile.File.Name);
                            File.Delete(localFileTarget);
                        }

                        Console.WriteLine(" done!");
                    }
                    catch (Exception ex)
                    {
                        Console.WriteLine(" error: {0}", ex);
                        errors++;
                    }
                }

                if (reupload)
                {
                    var remoteverificationfileexist = rawlist.Any(x => x.Name == (m_Options.Prefix + "-verification.json"));

                    if (remoteverificationfileexist)
                    {
                        Console.WriteLine("Found verification file {0} - deleting", m_Options.Prefix + "-verification.json");
                        backend.Delete(m_Options.Prefix + "-verification.json");
                    }
                }

                if (needspass > 0 && downloaded == 0)
                {
                    Console.WriteLine("No files downloaded, try adding --passphrase to decrypt files");
                    return(100);
                }

                Console.WriteLine("Download complete, of {0} remote files, {1} were downloaded with {2} errors", remotefiles.Count(), downloaded, errors);
                if (needspass > 0)
                {
                    Console.WriteLine("Additonally {0} remote files were skipped because of encryption, supply --passphrase to download those");
                }

                if (errors > 0)
                {
                    Console.WriteLine("There were errors during recompress of remote backend files!");
                    return(200);
                }

                return(0);
            }
        }
Exemplo n.º 8
0
        internal bool DoCompact(LocalDeleteDatabase db, bool hasVerifiedBackend, ref System.Data.IDbTransaction transaction, BackendManager sharedBackend)
        {
            var report = db.GetCompactReport(m_options.VolumeSize, m_options.Threshold, m_options.SmallFileSize, m_options.SmallFileMaxCount, transaction);

            report.ReportCompactData();

            if (report.ShouldReclaim || report.ShouldCompact)
            {
                // Workaround where we allow a running backendmanager to be used
                using (var bk = sharedBackend == null ? new BackendManager(m_backendurl, m_options, m_result.BackendWriter, db) : null)
                {
                    var backend = bk ?? sharedBackend;
                    if (!hasVerifiedBackend && !m_options.NoBackendverification)
                    {
                        FilelistProcessor.VerifyRemoteList(backend, m_options, db, m_result.BackendWriter);
                    }

                    BlockVolumeWriter newvol = new BlockVolumeWriter(m_options);
                    newvol.VolumeID = db.RegisterRemoteVolume(newvol.RemoteFilename, RemoteVolumeType.Blocks, RemoteVolumeState.Temporary, transaction);

                    IndexVolumeWriter newvolindex = null;
                    if (m_options.IndexfilePolicy != Options.IndexFileStrategy.None)
                    {
                        newvolindex          = new IndexVolumeWriter(m_options);
                        newvolindex.VolumeID = db.RegisterRemoteVolume(newvolindex.RemoteFilename, RemoteVolumeType.Index, RemoteVolumeState.Temporary, transaction);
                        db.AddIndexBlockLink(newvolindex.VolumeID, newvol.VolumeID, transaction);
                    }

                    long   blocksInVolume = 0;
                    byte[] buffer         = new byte[m_options.Blocksize];
                    var    remoteList     = db.GetRemoteVolumes().Where(n => n.State == RemoteVolumeState.Uploaded || n.State == RemoteVolumeState.Verified).ToArray();

                    //These are for bookkeeping
                    var uploadedVolumes   = new List <KeyValuePair <string, long> >();
                    var deletedVolumes    = new List <KeyValuePair <string, long> >();
                    var downloadedVolumes = new List <KeyValuePair <string, long> >();

                    //We start by deleting unused volumes to save space before uploading new stuff
                    var fullyDeleteable = (from v in remoteList
                                           where report.DeleteableVolumes.Contains(v.Name)
                                           select(IRemoteVolume) v).ToList();
                    deletedVolumes.AddRange(DoDelete(db, backend, fullyDeleteable, ref transaction));

                    // This list is used to pick up unused volumes,
                    // so they can be deleted once the upload of the
                    // required fragments is complete
                    var deleteableVolumes = new List <IRemoteVolume>();

                    if (report.ShouldCompact)
                    {
                        newvolindex?.StartVolume(newvol.RemoteFilename);
                        var volumesToDownload = (from v in remoteList
                                                 where report.CompactableVolumes.Contains(v.Name)
                                                 select(IRemoteVolume) v).ToList();

                        using (var q = db.CreateBlockQueryHelper(transaction))
                        {
                            foreach (var entry in new AsyncDownloader(volumesToDownload, backend))
                            {
                                using (var tmpfile = entry.TempFile)
                                {
                                    if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                                    {
                                        backend.WaitForComplete(db, transaction);
                                        return(false);
                                    }

                                    downloadedVolumes.Add(new KeyValuePair <string, long>(entry.Name, entry.Size));
                                    var inst = VolumeBase.ParseFilename(entry.Name);
                                    using (var f = new BlockVolumeReader(inst.CompressionModule, tmpfile, m_options))
                                    {
                                        foreach (var e in f.Blocks)
                                        {
                                            if (q.UseBlock(e.Key, e.Value, transaction))
                                            {
                                                //TODO: How do we get the compression hint? Reverse query for filename in db?
                                                var s = f.ReadBlock(e.Key, buffer);
                                                if (s != e.Value)
                                                {
                                                    throw new Exception(string.Format("Size mismatch problem for block {0}, {1} vs {2}", e.Key, s, e.Value));
                                                }

                                                newvol.AddBlock(e.Key, buffer, 0, s, Duplicati.Library.Interface.CompressionHint.Compressible);
                                                if (newvolindex != null)
                                                {
                                                    newvolindex.AddBlock(e.Key, e.Value);
                                                }

                                                db.MoveBlockToNewVolume(e.Key, e.Value, newvol.VolumeID, transaction);
                                                blocksInVolume++;

                                                if (newvol.Filesize > m_options.VolumeSize)
                                                {
                                                    uploadedVolumes.Add(new KeyValuePair <string, long>(newvol.RemoteFilename, newvol.Filesize));
                                                    if (newvolindex != null)
                                                    {
                                                        uploadedVolumes.Add(new KeyValuePair <string, long>(newvolindex.RemoteFilename, newvolindex.Filesize));
                                                    }

                                                    if (!m_options.Dryrun)
                                                    {
                                                        backend.Put(newvol, newvolindex);
                                                    }
                                                    else
                                                    {
                                                        Logging.Log.WriteDryrunMessage(LOGTAG, "WouldUploadGeneratedBlockset", "Would upload generated blockset of size {0}", Library.Utility.Utility.FormatSizeString(newvol.Filesize));
                                                    }


                                                    newvol          = new BlockVolumeWriter(m_options);
                                                    newvol.VolumeID = db.RegisterRemoteVolume(newvol.RemoteFilename, RemoteVolumeType.Blocks, RemoteVolumeState.Temporary, transaction);

                                                    if (m_options.IndexfilePolicy != Options.IndexFileStrategy.None)
                                                    {
                                                        newvolindex          = new IndexVolumeWriter(m_options);
                                                        newvolindex.VolumeID = db.RegisterRemoteVolume(newvolindex.RemoteFilename, RemoteVolumeType.Index, RemoteVolumeState.Temporary, transaction);
                                                        db.AddIndexBlockLink(newvolindex.VolumeID, newvol.VolumeID, transaction);
                                                        newvolindex.StartVolume(newvol.RemoteFilename);
                                                    }

                                                    blocksInVolume = 0;

                                                    //After we upload this volume, we can delete all previous encountered volumes
                                                    deletedVolumes.AddRange(DoDelete(db, backend, deleteableVolumes, ref transaction));
                                                    deleteableVolumes = new List <IRemoteVolume>();
                                                }
                                            }
                                        }
                                    }

                                    deleteableVolumes.Add(entry);
                                }
                            }

                            if (blocksInVolume > 0)
                            {
                                uploadedVolumes.Add(new KeyValuePair <string, long>(newvol.RemoteFilename, newvol.Filesize));
                                if (newvolindex != null)
                                {
                                    uploadedVolumes.Add(new KeyValuePair <string, long>(newvolindex.RemoteFilename, newvolindex.Filesize));
                                }
                                if (!m_options.Dryrun)
                                {
                                    backend.Put(newvol, newvolindex);
                                }
                                else
                                {
                                    Logging.Log.WriteDryrunMessage(LOGTAG, "WouldUploadGeneratedBlockset", "Would upload generated blockset of size {0}", Library.Utility.Utility.FormatSizeString(newvol.Filesize));
                                }
                            }
                            else
                            {
                                db.RemoveRemoteVolume(newvol.RemoteFilename, transaction);
                                if (newvolindex != null)
                                {
                                    db.RemoveRemoteVolume(newvolindex.RemoteFilename, transaction);
                                    newvolindex.FinishVolume(null, 0);
                                }
                            }
                        }
                    }
                    else
                    {
                        newvolindex?.Dispose();
                        newvol.Dispose();
                    }

                    deletedVolumes.AddRange(DoDelete(db, backend, deleteableVolumes, ref transaction));

                    var downloadSize = downloadedVolumes.Where(x => x.Value >= 0).Aggregate(0L, (a, x) => a + x.Value);
                    var deletedSize  = deletedVolumes.Where(x => x.Value >= 0).Aggregate(0L, (a, x) => a + x.Value);
                    var uploadSize   = uploadedVolumes.Where(x => x.Value >= 0).Aggregate(0L, (a, x) => a + x.Value);

                    m_result.DeletedFileCount    = deletedVolumes.Count;
                    m_result.DownloadedFileCount = downloadedVolumes.Count;
                    m_result.UploadedFileCount   = uploadedVolumes.Count;
                    m_result.DeletedFileSize     = deletedSize;
                    m_result.DownloadedFileSize  = downloadSize;
                    m_result.UploadedFileSize    = uploadSize;
                    m_result.Dryrun = m_options.Dryrun;

                    if (m_result.Dryrun)
                    {
                        if (downloadedVolumes.Count == 0)
                        {
                            Logging.Log.WriteDryrunMessage(LOGTAG, "CompactResults", "Would delete {0} files, which would reduce storage by {1}", m_result.DeletedFileCount, Library.Utility.Utility.FormatSizeString(m_result.DeletedFileSize));
                        }
                        else
                        {
                            Logging.Log.WriteDryrunMessage(LOGTAG, "CompactResults", "Would download {0} file(s) with a total size of {1}, delete {2} file(s) with a total size of {3}, and compact to {4} file(s) with a size of {5}, which would reduce storage by {6} file(s) and {7}",
                                                           m_result.DownloadedFileCount,
                                                           Library.Utility.Utility.FormatSizeString(m_result.DownloadedFileSize),
                                                           m_result.DeletedFileCount,
                                                           Library.Utility.Utility.FormatSizeString(m_result.DeletedFileSize), m_result.UploadedFileCount,
                                                           Library.Utility.Utility.FormatSizeString(m_result.UploadedFileSize),
                                                           m_result.DeletedFileCount - m_result.UploadedFileCount,
                                                           Library.Utility.Utility.FormatSizeString(m_result.DeletedFileSize - m_result.UploadedFileSize));
                        }
                    }
                    else
                    {
                        if (m_result.DownloadedFileCount == 0)
                        {
                            Logging.Log.WriteInformationMessage(LOGTAG, "CompactResults", "Deleted {0} files, which reduced storage by {1}", m_result.DeletedFileCount, Library.Utility.Utility.FormatSizeString(m_result.DeletedFileSize));
                        }
                        else
                        {
                            Logging.Log.WriteInformationMessage(LOGTAG, "CompactResults", "Downloaded {0} file(s) with a total size of {1}, deleted {2} file(s) with a total size of {3}, and compacted to {4} file(s) with a size of {5}, which reduced storage by {6} file(s) and {7}",
                                                                m_result.DownloadedFileCount,
                                                                Library.Utility.Utility.FormatSizeString(downloadSize),
                                                                m_result.DeletedFileCount,
                                                                Library.Utility.Utility.FormatSizeString(m_result.DeletedFileSize),
                                                                m_result.UploadedFileCount,
                                                                Library.Utility.Utility.FormatSizeString(m_result.UploadedFileSize),
                                                                m_result.DeletedFileCount - m_result.UploadedFileCount,
                                                                Library.Utility.Utility.FormatSizeString(m_result.DeletedFileSize - m_result.UploadedFileSize));
                        }
                    }

                    backend.WaitForComplete(db, transaction);
                }

                m_result.EndTime = DateTime.UtcNow;
                return((m_result.DeletedFileCount + m_result.UploadedFileCount) > 0);
            }
            else
            {
                m_result.EndTime = DateTime.UtcNow;
                return(false);
            }
        }
Exemplo n.º 9
0
        public async Task FilesetFiles()
        {
            // Choose a dblock size that is small enough so that more than one volume is needed.
            Dictionary <string, string> options = new Dictionary <string, string>(this.TestOptions)
            {
                ["dblock-size"] = "10mb",

                // This allows us to inspect the dlist files without needing the BackendManager (which is inaccessible here) to decrypt them.
                ["no-encryption"] = "true"
            };

            // Run a full backup.
            using (Controller c = new Controller("file://" + this.TARGETFOLDER, options, null))
            {
                IBackupResults backupResults = c.Backup(new[] { this.DATAFOLDER });
                Assert.AreEqual(0, backupResults.Errors.Count());
                Assert.AreEqual(0, backupResults.Warnings.Count());
            }

            // Run a partial backup.
            using (Controller c = new Controller("file://" + this.TARGETFOLDER, options, null))
            {
                IBackupResults backupResults = await this.RunPartialBackup(c).ConfigureAwait(false);

                Assert.AreEqual(0, backupResults.Errors.Count());
                Assert.AreEqual(1, backupResults.Warnings.Count());
            }

            Dictionary <DateTime, int> GetBackupTypesFromRemoteFiles(Controller c, out List <string> filelistFiles)
            {
                Dictionary <DateTime, int> map = new Dictionary <DateTime, int>();

                filelistFiles = new List <string>();

                IListRemoteResults remoteFiles = c.ListRemote();

                foreach (IFileEntry file in remoteFiles.Files)
                {
                    IParsedVolume volume = VolumeBase.ParseFilename(file);
                    if (volume != null && volume.FileType == RemoteVolumeType.Files)
                    {
                        string dlistFile = Path.Combine(this.TARGETFOLDER, volume.File.Name);
                        filelistFiles.Add(dlistFile);
                        VolumeBase.FilesetData filesetData = VolumeReaderBase.GetFilesetData(volume.CompressionModule, dlistFile, new Options(options));
                        map[volume.Time] = filesetData.IsFullBackup ? BackupType.FULL_BACKUP : BackupType.PARTIAL_BACKUP;
                    }
                }

                return(map);
            }

            // Purge a file and verify that the fileset file exists in the new dlist files.
            List <string> dlistFiles;
            Dictionary <DateTime, int> backupTypeMap;

            using (Controller c = new Controller("file://" + this.TARGETFOLDER, options, null))
            {
                IPurgeFilesResults purgeResults = c.PurgeFiles(new Library.Utility.FilterExpression($"*{this.fileSizes[0]}*"));
                Assert.AreEqual(0, purgeResults.Errors.Count());
                Assert.AreEqual(0, purgeResults.Warnings.Count());

                List <IListResultFileset> filesets = c.List().Filesets.ToList();
                Assert.AreEqual(2, filesets.Count);
                Assert.AreEqual(BackupType.FULL_BACKUP, filesets.Single(x => x.Version == 1).IsFullBackup);
                Assert.AreEqual(BackupType.PARTIAL_BACKUP, filesets.Single(x => x.Version == 0).IsFullBackup);

                backupTypeMap = GetBackupTypesFromRemoteFiles(c, out dlistFiles);
            }

            int[] backupTypes = backupTypeMap.OrderByDescending(x => x.Key).Select(x => x.Value).ToArray();
            Assert.AreEqual(2, backupTypes.Length);
            Assert.AreEqual(BackupType.FULL_BACKUP, backupTypes[1]);
            Assert.AreEqual(BackupType.PARTIAL_BACKUP, backupTypes[0]);

            // Remove the dlist files.
            foreach (string dlistFile in dlistFiles)
            {
                File.Delete(dlistFile);
            }

            // Run a repair and verify that the fileset file exists in the new dlist files.
            using (Controller c = new Controller("file://" + this.TARGETFOLDER, options, null))
            {
                IRepairResults repairResults = c.Repair();
                Assert.AreEqual(0, repairResults.Errors.Count());
                Assert.AreEqual(0, repairResults.Warnings.Count());

                List <IListResultFileset> filesets = c.List().Filesets.ToList();
                Assert.AreEqual(2, filesets.Count);
                Assert.AreEqual(BackupType.FULL_BACKUP, filesets.Single(x => x.Version == 1).IsFullBackup);
                Assert.AreEqual(BackupType.PARTIAL_BACKUP, filesets.Single(x => x.Version == 0).IsFullBackup);

                backupTypeMap = GetBackupTypesFromRemoteFiles(c, out _);
            }

            backupTypes = backupTypeMap.OrderByDescending(x => x.Key).Select(x => x.Value).ToArray();
            Assert.AreEqual(2, backupTypes.Length);
            Assert.AreEqual(BackupType.FULL_BACKUP, backupTypes[1]);
            Assert.AreEqual(BackupType.PARTIAL_BACKUP, backupTypes[0]);
        }
Exemplo n.º 10
0
        public static Task Run(BackupDatabase database, Options options, BackupResults result, ITaskReader taskreader, string lasttempfilelist, long lasttempfileid)
        {
            return(AutomationExtensions.RunTask(new
            {
                UploadChannel = Channels.BackendRequest.ForWrite
            },

                                                async self =>
            {
                // Check if we should upload a synthetic filelist
                if (options.DisableSyntheticFilelist || string.IsNullOrWhiteSpace(lasttempfilelist) || lasttempfileid < 0)
                {
                    return;
                }

                // Check that we still need to process this after the cleanup has performed its duties
                var syntbase = await database.GetRemoteVolumeFromIDAsync(lasttempfileid);

                // If we do not have a valid entry, warn and quit
                if (syntbase.Name == null || syntbase.State != RemoteVolumeState.Uploaded)
                {
                    // TODO: If the repair succeeds, this could give a false warning?
                    Logging.Log.WriteWarningMessage(LOGTAG, "MissingTemporaryFilelist", null, "Expected there to be a temporary fileset for synthetic filelist ({0}, {1}), but none was found?", lasttempfileid, lasttempfilelist);
                    return;
                }

                // Files is missing or repaired
                if (syntbase.Name == null || (syntbase.State != RemoteVolumeState.Uploading && syntbase.State != RemoteVolumeState.Temporary))
                {
                    Logging.Log.WriteInformationMessage(LOGTAG, "SkippingSyntheticListUpload", "Skipping synthetic upload because temporary fileset appers to be complete: ({0}, {1}, {2})", lasttempfileid, lasttempfilelist, syntbase.State);
                    return;
                }

                // Ready to build and upload the synthetic list
                await database.CommitTransactionAsync("PreSyntheticFilelist");
                var incompleteFilesets = (await database.GetIncompleteFilesetsAsync()).OrderBy(x => x.Value).ToList();

                result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_PreviousBackupFinalize);
                Logging.Log.WriteInformationMessage(LOGTAG, "PreviousBackupFilelistUpload", "Uploading filelist from previous interrupted backup");

                if (!await taskreader.ProgressAsync)
                {
                    return;
                }

                var incompleteSet = incompleteFilesets.Last();
                var badIds = from n in incompleteFilesets select n.Key;

                var prevs = (from n in await database.GetFilesetTimesAsync()
                             where
                             n.Key < incompleteSet.Key
                             &&
                             !badIds.Contains(n.Key)
                             orderby n.Key
                             select n.Key).ToArray();

                var prevId = prevs.Length == 0 ? -1 : prevs.Last();

                FilesetVolumeWriter fsw = null;
                try
                {
                    var s = 1;
                    var fileTime = incompleteSet.Value + TimeSpan.FromSeconds(s);
                    var oldFilesetID = incompleteSet.Key;

                    // Probe for an unused filename
                    while (s < 60)
                    {
                        var id = await database.GetRemoteVolumeIDAsync(VolumeBase.GenerateFilename(RemoteVolumeType.Files, options, null, fileTime));
                        if (id < 0)
                        {
                            break;
                        }

                        fileTime = incompleteSet.Value + TimeSpan.FromSeconds(++s);
                    }

                    fsw = new FilesetVolumeWriter(options, fileTime);
                    fsw.VolumeID = await database.RegisterRemoteVolumeAsync(fsw.RemoteFilename, RemoteVolumeType.Files, RemoteVolumeState.Temporary);

                    if (!string.IsNullOrEmpty(options.ControlFiles))
                    {
                        foreach (var p in options.ControlFiles.Split(new char[] { System.IO.Path.PathSeparator }, StringSplitOptions.RemoveEmptyEntries))
                        {
                            fsw.AddControlFile(p, options.GetCompressionHintFromFilename(p));
                        }
                    }

                    var newFilesetID = await database.CreateFilesetAsync(fsw.VolumeID, fileTime);
                    await database.LinkFilesetToVolumeAsync(newFilesetID, fsw.VolumeID);
                    await database.AppendFilesFromPreviousSetAsync(null, newFilesetID, prevId, fileTime);

                    await database.WriteFilesetAsync(fsw, newFilesetID);

                    if (!await taskreader.ProgressAsync)
                    {
                        return;
                    }

                    await database.UpdateRemoteVolumeAsync(fsw.RemoteFilename, RemoteVolumeState.Uploading, -1, null);
                    await database.CommitTransactionAsync("CommitUpdateFilelistVolume");
                    await self.UploadChannel.WriteAsync(new FilesetUploadRequest(fsw));
                    fsw = null;
                }
                catch
                {
                    await database.RollbackTransactionAsync();
                    throw;
                }
                finally
                {
                    if (fsw != null)
                    {
                        try { fsw.Dispose(); }
                        catch { fsw = null; }
                    }
                }
            }
                                                ));
        }
Exemplo n.º 11
0
        public static int Run(List <string> args, Dictionary <string, string> options, Library.Utility.IFilter filter)
        {
            if (args.Count != 4)
            {
                Console.WriteLine("Invalid argument count ({0} expected 4): {1}{2}", args.Count, Environment.NewLine, string.Join(Environment.NewLine, args));
                return(100);
            }

            string target_compr_module = args[1];

            if (!Library.DynamicLoader.CompressionLoader.Keys.Contains(target_compr_module))
            {
                Console.WriteLine("Target compression module not found: {0}{1}Modules supported: {2}", args[1], Environment.NewLine, string.Join(", ", Library.DynamicLoader.CompressionLoader.Keys));
                return(100);
            }

            var m_Options = new Options(options);

            using (var backend = Library.DynamicLoader.BackendLoader.GetBackend(args[2], options))
            {
                if (backend == null)
                {
                    Console.WriteLine("Backend not found: {0}{1}Backends supported: {2}", args[2], Environment.NewLine, string.Join(", ", Library.DynamicLoader.BackendLoader.Keys));
                    return(100);
                }

                var targetfolder = Path.GetFullPath(args[3]);

                if (!Directory.Exists(args[3]))
                {
                    Console.WriteLine("Creating target folder: {0}", targetfolder);
                    Directory.CreateDirectory(targetfolder);
                }

                Console.WriteLine("Listing files on backend: {0} ...", backend.ProtocolKey);

                var rawlist = backend.List();

                Console.WriteLine("Found {0} files", rawlist.Count);

                var i          = 0;
                var downloaded = 0;
                var errors     = 0;
                var needspass  = 0;

                var remotefiles =
                    (from x in rawlist
                     let n = VolumeBase.ParseFilename(x)
                             where n != null && n.Prefix == m_Options.Prefix
                             select n).ToArray(); //ToArray() ensures that we do not remote-request it multiple times

                if (remotefiles.Length == 0)
                {
                    if (rawlist.Count == 0)
                    {
                        Console.WriteLine("No files were found at the remote location, perhaps the target url is incorrect?");
                    }
                    else
                    {
                        var tmp =
                            (from x in rawlist
                             let n = VolumeBase.ParseFilename(x)
                                     where
                                     n != null
                                     select n.Prefix).ToArray();

                        var types = tmp.Distinct().ToArray();
                        if (tmp.Length == 0)
                        {
                            Console.WriteLine("Found {0} files at the remote storage, but none that could be parsed", rawlist.Count);
                        }
                        else if (types.Length == 1)
                        {
                            Console.WriteLine("Found {0} parse-able files with the prefix {1}, did you forget to set the backup-prefix?", tmp.Length, types[0]);
                        }
                        else
                        {
                            Console.WriteLine("Found {0} parse-able files (of {1} files) with different prefixes: {2}, did you forget to set the backup-prefix?", tmp.Length, rawlist.Count, string.Join(", ", types));
                        }
                    }

                    return(100);
                }

                foreach (var entry in remotefiles)
                {
                    try
                    {
                        Console.Write("{0}: {1}", i, entry.File.Name);

                        var local = Path.Combine(targetfolder, entry.File.Name);
                        if (entry.EncryptionModule != null)
                        {
                            if (string.IsNullOrWhiteSpace(m_Options.Passphrase))
                            {
                                needspass++;
                                Console.WriteLine(" - No passphrase supplied, skipping");
                                continue;
                            }

                            local = local.Substring(0, local.Length - entry.EncryptionModule.Length - 1);
                        }

                        if (entry.CompressionModule == target_compr_module)
                        {
                            Console.WriteLine(" - compression types are same");
                            continue;
                        }

                        string localNew;

                        if (entry.CompressionModule != null)
                        {
                            localNew = local.Substring(0, local.Length - entry.CompressionModule.Length - 1) + "." + target_compr_module;

                            if (File.Exists(localNew))
                            {
                                Console.WriteLine(" - target file already exist");
                                continue;
                            }
                        }
                        else
                        {
                            Console.WriteLine(" - cannot detect compression type");
                            continue;
                        }

                        if (File.Exists(local))
                        {
                            File.Delete(local);
                        }

                        Console.Write(" - downloading ({0})...", Library.Utility.Utility.FormatSizeString(entry.File.Size));

                        using (var tf = new Library.Utility.TempFile())
                        {
                            backend.Get(entry.File.Name, tf);
                            downloaded++;

                            if (entry.EncryptionModule != null)
                            {
                                Console.Write(" - decrypting ...");
                                using (var m = Library.DynamicLoader.EncryptionLoader.GetModule(entry.EncryptionModule, m_Options.Passphrase, options))
                                    using (var tf2 = new Library.Utility.TempFile())
                                    {
                                        m.Decrypt(tf, tf2);
                                        File.Copy(tf2, local);
                                        File.Delete(tf2);
                                    }
                            }
                            else
                            {
                                File.Copy(tf, local);
                            }

                            File.Delete(tf);
                        }

                        string reupload;
                        options.TryGetValue("reupload", out reupload);

                        if (entry.CompressionModule != null)
                        {
                            Console.Write(" - recompressing ...");

                            using (var cmOld = Library.DynamicLoader.CompressionLoader.GetModule(entry.CompressionModule, local, options))
                            {
                                using (var cmNew = Library.DynamicLoader.CompressionLoader.GetModule(target_compr_module, localNew, options))
                                {
                                    foreach (var cmfile in cmOld.ListFiles(""))
                                    {
                                        using (var sourceStream = cmOld.OpenRead(cmfile))
                                            using (var cs = cmNew.CreateFile(cmfile, Duplicati.Library.Interface.CompressionHint.Compressible, cmOld.GetLastWriteTime(cmfile)))
                                                Library.Utility.Utility.CopyStream(sourceStream, cs);
                                    }
                                }
                            }

                            File.Delete(local);
                        }

                        string reencrypt;
                        options.TryGetValue("reencrypt", out reencrypt);

                        if (reencrypt != null && reencrypt == "true" && entry.EncryptionModule != null)
                        {
                            Console.Write(" - reencrypting ...");
                            using (var m = Library.DynamicLoader.EncryptionLoader.GetModule(entry.EncryptionModule, m_Options.Passphrase, options))
                            {
                                m.Encrypt(localNew, localNew + "." + m.FilenameExtension);
                                File.Delete(localNew);
                                localNew = localNew + "." + m.FilenameExtension;
                            }
                        }

                        if (reupload != null && reupload == "true")
                        {
                            backend.Put((new FileInfo(localNew)).Name, localNew);
                            backend.Delete(entry.File.Name);
                            File.Delete(localNew);
                        }

                        Console.WriteLine(" done!");
                    }
                    catch (Exception ex)
                    {
                        Console.WriteLine(" error: {0}", ex.ToString());
                        errors++;
                    }

                    i++;
                }

                if (needspass > 0 && downloaded == 0)
                {
                    Console.WriteLine("No files downloaded, try adding --passphrase to decrypt files");
                    return(100);
                }

                Console.WriteLine("Download complete, of {0} remote files, {1} were downloaded with {2} errors", remotefiles.Count(), downloaded, errors);
                if (needspass > 0)
                {
                    Console.WriteLine("Additonally {0} remote files were skipped because of encryption, supply --passphrase to download those");
                }

                if (errors > 0)
                {
                    return(200);
                }
                else
                {
                    return(0);
                }
            }
        }