/// <summary>
        /// Run the recreate procedure
        /// </summary>
        /// <param name="path">Path to the database that will be created</param>
        /// <param name="filelistfilter">A filter that can be used to disregard certain remote files, intended to be used to select a certain filelist</param>
        /// <param name="filenamefilter">Filters the files in a filelist to prevent downloading unwanted data</param>
        /// <param name="blockprocessor">A callback hook that can be used to work with downloaded block volumes, intended to be use to recover data blocks while processing blocklists</param>
        internal void DoRun(LocalDatabase dbparent, Library.Utility.IFilter filter = null, NumberedFilterFilelistDelegate filelistfilter = null, BlockVolumePostProcessor blockprocessor = null)
        {
            var hashalg = System.Security.Cryptography.HashAlgorithm.Create(m_options.BlockHashAlgorithm);
            if (hashalg == null)
                throw new Exception(string.Format(Strings.Foresthash.InvalidHashAlgorithm, m_options.BlockHashAlgorithm));
            var hashsize = hashalg.HashSize / 8;

            //We build a local database in steps.
            using(var restoredb = new LocalRecreateDatabase(dbparent, m_options))
            using(var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, restoredb))
            {
                var volumeIds = new Dictionary<string, long>();

                var rawlist = backend.List();

                //First step is to examine the remote storage to see what
                // kind of data we can find
                var remotefiles =
                (from x in rawlist
                let n = VolumeBase.ParseFilename(x)
                where
                    n != null
                        &&
                    n.Prefix == m_options.Prefix
                select n).ToArray(); //ToArray() ensures that we do not remote-request it multiple times

                if (remotefiles.Length == 0)
                {
                    if (rawlist.Count == 0)
                        throw new Exception("No files were found at the remote location, perhaps the target url is incorrect?");
                    else
                    {
                        var tmp =
                    (from x in rawlist
                        let n = VolumeBase.ParseFilename(x)
                    where
                        n != null
                    select n.Prefix).ToArray();

                        var types = tmp.Distinct().ToArray();
                        if (tmp.Length == 0)
                            throw new Exception(string.Format("Found {0} files at the remote storage, but none that could be parsed", rawlist.Count));
                        else if (types.Length == 1)
                            throw new Exception(string.Format("Found {0} parse-able files with the prefix {1}, did you forget to set the backup-prefix?", tmp.Length, types[0]));
                        else
                            throw new Exception(string.Format("Found {0} parse-able files (of {1} files) with different prefixes: {2}, did you forget to set the backup-prefix?", tmp.Length, rawlist.Count, string.Join(", ", types)));
                    }
                }

                //Then we select the filelist we should work with,
                // and create the filelist table to fit
                IEnumerable<IParsedVolume> filelists =
                    from n in remotefiles
                    where n.FileType == RemoteVolumeType.Files
                    orderby n.Time descending
                    select n;

                if (filelistfilter != null)
                    filelists = filelistfilter(filelists).Select(x => x.Value).ToArray();

                foreach(var fl in remotefiles)
                    volumeIds[fl.File.Name] = restoredb.RegisterRemoteVolume(fl.File.Name, fl.FileType, RemoteVolumeState.Uploaded);

                //Record all blocksets and files needed
                using(var tr = restoredb.BeginTransaction())
                {
                    var filelistWork = (from n in filelists orderby n.Time select new RemoteVolume(n.File) as IRemoteVolume).ToList();
                    foreach(var entry in new AsyncDownloader(filelistWork, backend))
                        try
                        {
                            if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                            {
                                backend.WaitForComplete(restoredb, null);
                                return;
                            }

                            using(var tmpfile = entry.TempFile)
                            {
                                if (entry.Hash != null && entry.Size > 0)
                                    restoredb.UpdateRemoteVolume(entry.Name, RemoteVolumeState.Verified, entry.Size, entry.Hash, tr);

                                var parsed = VolumeBase.ParseFilename(entry.Name);
                                // Create timestamped operations based on the file timestamp
                                var filesetid = restoredb.CreateFileset(volumeIds[entry.Name], parsed.Time, tr);
                                using(var filelistreader = new FilesetVolumeReader(parsed.CompressionModule, tmpfile, m_options))
                                    foreach(var fe in filelistreader.Files.Where(x => Library.Utility.FilterExpression.Matches(filter, x.Path)))
                                    {
                                        try
                                        {
                                            if (fe.Type == FilelistEntryType.Folder)
                                            {
                                                restoredb.AddDirectoryEntry(filesetid, fe.Path, fe.Time, fe.Metahash, fe.Metahash == null ? -1 : fe.Metasize, tr);
                                            }
                                            else if (fe.Type == FilelistEntryType.File)
                                            {
                                                var blocksetid = restoredb.AddBlockset(fe.Hash, fe.Size, fe.BlocklistHashes, tr);
                                                restoredb.AddFileEntry(filesetid, fe.Path, fe.Time, blocksetid, fe.Metahash, fe.Metahash == null ? -1 : fe.Metasize, tr);
                                            }
                                            else if (fe.Type == FilelistEntryType.Symlink)
                                            {
                                                restoredb.AddSymlinkEntry(filesetid, fe.Path, fe.Time, fe.Metahash, fe.Metahash == null ? -1 : fe.Metasize, tr);
                                            }
                                            else
                                            {
                                                m_result.AddWarning(string.Format("Skipping file-entry with unknown type {0}: {1} ", fe.Type, fe.Path), null);
                                            }
                                        }
                                        catch (Exception ex)
                                        {
                                            m_result.AddWarning(string.Format("Failed to process file-entry: {0}", fe.Path), ex);
                                        }
                                    }
                            }
                        }
                        catch (Exception ex)
                        {
                            m_result.AddWarning(string.Format("Failed to process file: {0}", entry.Name), ex);
                            if (ex is System.Threading.ThreadAbortException)
                                throw;
                        }

                    using(new Logging.Timer("CommitUpdateFilesetFromRemote"))
                        tr.Commit();
                }

                //Grab all index files, and update the block table
                using(var tr = restoredb.BeginTransaction())
                {
                    var indexfiles =
                        from n in remotefiles
                        where n.FileType == RemoteVolumeType.Index
                        select new RemoteVolume(n.File) as IRemoteVolume;

                    foreach(var sf in new AsyncDownloader(indexfiles.ToList(), backend))
                        try
                        {
                            if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                            {
                                backend.WaitForComplete(restoredb, null);
                                return;
                            }

                            using(var tmpfile = sf.TempFile)
                            {
                                if (sf.Hash != null && sf.Size > 0)
                                    restoredb.UpdateRemoteVolume(sf.Name, RemoteVolumeState.Verified, sf.Size, sf.Hash, tr);

                                using(var svr = new IndexVolumeReader(RestoreHandler.GetCompressionModule(sf.Name), tmpfile, m_options, hashsize))
                                {
                                    Utility.VerifyParameters(restoredb, m_options);

                                    foreach(var a in svr.Volumes)
                                    {
                                        var volumeID = restoredb.GetRemoteVolumeID(a.Filename);
                                        //Add all block/volume mappings
                                        foreach(var b in a.Blocks)
                                            restoredb.UpdateBlock(b.Key, b.Value, volumeID, tr);

                                        restoredb.UpdateRemoteVolume(a.Filename, RemoteVolumeState.Verified, a.Length, a.Hash, tr);
                                        restoredb.AddIndexBlockLink(restoredb.GetRemoteVolumeID(sf.Name), volumeID, tr);
                                    }

                                    //If there are blocklists in the index file, update the blocklists
                                    foreach(var b in svr.BlockLists)
                                        restoredb.UpdateBlockset(b.Hash, b.Blocklist, tr);
                                }
                            }
                        }
                        catch (Exception ex)
                        {
                            //Not fatal
                            m_result.AddWarning(string.Format("Failed to process index file: {0}", sf.Name), ex);
                            if (ex is System.Threading.ThreadAbortException)
                                throw;
                        }

                    using(new Logging.Timer("CommitRecreatedDb"))
                        tr.Commit();

                    // TODO: In some cases, we can avoid downloading all index files,
                    // if we are lucky and pick the right ones
                }

                // We have now grabbed as much information as possible,
                // if we are still missing data, we must now fetch block files
                restoredb.FindMissingBlocklistHashes(hashsize, null);

                //We do this in three passes
                for(var i = 0; i < 3; i++)
                {
                    // Grab the list matching the pass type
                    var lst = restoredb.GetMissingBlockListVolumes(i).ToList();
                    foreach (var sf in new AsyncDownloader(lst, backend))
                        using (var tmpfile = sf.TempFile)
                        using (var rd = new BlockVolumeReader(RestoreHandler.GetCompressionModule(sf.Name), tmpfile, m_options))
                        using (var tr = restoredb.BeginTransaction())
                        {
                            if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                            {
                                backend.WaitForComplete(restoredb, null);
                                return;
                            }

                            var volumeid = restoredb.GetRemoteVolumeID(sf.Name);

                            // Update the block table so we know about the block/volume map
                            foreach(var h in rd.Blocks)
                                restoredb.UpdateBlock(h.Key, h.Value, volumeid, tr);

                            // Grab all known blocklists from the volume
                            foreach (var blocklisthash in restoredb.GetBlockLists(volumeid))
                                restoredb.UpdateBlockset(blocklisthash, rd.ReadBlocklist(blocklisthash, hashsize), tr);

                            // Update tables so we know if we are done
                            restoredb.FindMissingBlocklistHashes(hashsize, tr);

                            using(new Logging.Timer("CommitRestoredBlocklist"))
                                tr.Commit();

                            //At this point we can patch files with data from the block volume
                            if (blockprocessor != null)
                                blockprocessor(sf.Name, rd);
                        }
                }

                backend.WaitForComplete(restoredb, null);

                //All done, we must verify that we have all blocklist fully intact
                // if this fails, the db will not be deleted, so it can be used,
                // except to continue a backup
                restoredb.VerifyConsistency(null);
            }
        }
示例#2
0
        private void DoRun(LocalDatabase dbparent, Library.Utility.IFilter filter, RestoreResults result)
        {
            //In this case, we check that the remote storage fits with the database.
            //We can then query the database and find the blocks that we need to do the restore
            using(var database = new LocalRestoreDatabase(dbparent, m_options.Blocksize))
            using(var backend = new BackendManager(m_backendurl, m_options, result.BackendWriter, database))
            {
                database.SetResult(m_result);
                Utility.VerifyParameters(database, m_options);
	        	
                var blockhasher = System.Security.Cryptography.HashAlgorithm.Create(m_options.BlockHashAlgorithm);
                var filehasher = System.Security.Cryptography.HashAlgorithm.Create(m_options.FileHashAlgorithm);
                if (blockhasher == null)
                    throw new Exception(string.Format(Strings.Foresthash.InvalidHashAlgorithm, m_options.BlockHashAlgorithm));
                if (!blockhasher.CanReuseTransform)
                    throw new Exception(string.Format(Strings.Foresthash.InvalidCryptoSystem, m_options.BlockHashAlgorithm));

                if (filehasher == null)
                    throw new Exception(string.Format(Strings.Foresthash.InvalidHashAlgorithm, m_options.FileHashAlgorithm));
                if (!filehasher.CanReuseTransform)
                    throw new Exception(string.Format(Strings.Foresthash.InvalidCryptoSystem, m_options.FileHashAlgorithm));

                if (!m_options.NoBackendverification)
                {
                    m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Restore_PreRestoreVerify);                
                    FilelistProcessor.VerifyRemoteList(backend, m_options, database, result.BackendWriter);
                }

                //Figure out what files are to be patched, and what blocks are needed
                m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Restore_CreateFileList);
                using(new Logging.Timer("PrepareBlockList"))
                    PrepareBlockAndFileList(database, m_options, filter, result);

                //Make the entire output setup
                m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Restore_CreateTargetFolders);
                using(new Logging.Timer("CreateDirectory"))
                    CreateDirectoryStructure(database, m_options, result);
                
                //If we are patching an existing target folder, do not touch stuff that is already updated
                m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Restore_ScanForExistingFiles);
                using(new Logging.Timer("ScanForexistingTargetBlocks"))
                    ScanForExistingTargetBlocks(database, m_blockbuffer, blockhasher, filehasher, m_options, result);

                //Look for existing blocks in the original source files only
                using(new Logging.Timer("ScanForExistingSourceBlocksFast"))
#if DEBUG
                    if (!m_options.NoLocalBlocks && !string.IsNullOrEmpty(m_options.Restorepath))
#else
				    if (!string.IsNullOrEmpty(m_options.Restorepath))
#endif
                    {
                        m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Restore_ScanForLocalBlocks);
                        ScanForExistingSourceBlocksFast(database, m_options, m_blockbuffer, blockhasher, result);
                    }

                if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                {
                    backend.WaitForComplete(database, null);
                    return;
                }

                // If other local files already have the blocks we want, we use them instead of downloading
                if (m_options.PatchWithLocalBlocks)
                {
                    m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Restore_PatchWithLocalBlocks);
                    using(new Logging.Timer("PatchWithLocalBlocks"))
                        ScanForExistingSourceBlocks(database, m_options, m_blockbuffer, blockhasher, result);
                }

                if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                {
                    backend.WaitForComplete(database, null);
                    return;
                }
                
                // Fill BLOCKS with remote sources
                var volumes = database.GetMissingVolumes().ToList();

                if (volumes.Count > 0)
                {
                    m_result.AddMessage(string.Format("{0} remote files are required to restore", volumes.Count));
                    m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Restore_DownloadingRemoteFiles);
                }

                var brokenFiles = new List<string>();
				foreach(var blockvolume in new AsyncDownloader(volumes, backend))
					try
					{
                        if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                        {
                            backend.WaitForComplete(database, null);
                            return;
                        }
                    
						using(var tmpfile = blockvolume.TempFile)
						using(var blocks = new BlockVolumeReader(GetCompressionModule(blockvolume.Name), tmpfile, m_options))
							PatchWithBlocklist(database, blocks, m_options, result, m_blockbuffer);
					}
					catch (Exception ex)
					{
                        brokenFiles.Add(blockvolume.Name);
                        result.AddError(string.Format("Failed to patch with remote file: \"{0}\", message: {1}", blockvolume.Name, ex.Message), ex);
                        if (ex is System.Threading.ThreadAbortException)
                            throw;
					}
                
                // Reset the filehasher if it was used to verify existing files
                filehasher.Initialize();
					
                if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                    return;
                
                m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Restore_PostRestoreVerify);
                
                var fileErrors = 0L;
                // After all blocks in the files are restored, verify the file hash
                using(new Logging.Timer("RestoreVerification"))
                    foreach (var file in database.GetFilesToRestore())
                    {
                        try
                        {
                            if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                            {
                                backend.WaitForComplete(database, null);
                                return;
                            }
                            
                            result.AddVerboseMessage("Testing restored file integrity: {0}", file.Path);
                            
                            string key;
                            long size;
                            using (var fs = m_systemIO.FileOpenRead(file.Path))
                            {
                                size = fs.Length;
                                key = Convert.ToBase64String(filehasher.ComputeHash(fs));
                            }
    
                            if (key != file.Hash)
                                throw new Exception(string.Format("Failed to restore file: \"{0}\". File hash is {1}, expected hash is {2}", file.Path, key, file.Hash));
                            result.FilesRestored++;
                            result.SizeOfRestoredFiles += size;
                        } 
                        catch (Exception ex)
                        {
                            fileErrors++;
                            result.AddWarning(ex.Message, ex);
                            if (ex is System.Threading.ThreadAbortException)
                                throw;
                        }
                    }
                    
                if (fileErrors > 0 && brokenFiles.Count > 0)
                    m_result.AddMessage(string.Format("Failed to restore {0} files, additionally the following files failed to download, which may be the cause:{1}", fileErrors, Environment.NewLine, string.Join(Environment.NewLine, brokenFiles)));

                // Drop the temp tables
                database.DropRestoreTable();
                backend.WaitForComplete(database, null);
            }
            
            m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Restore_Complete);
            result.EndTime = DateTime.UtcNow;
        }
示例#3
0
        public void RunRepairRemote()
        {
            if (!System.IO.File.Exists(m_options.Dbpath))
                throw new Exception(string.Format("Database file does not exist: {0}", m_options.Dbpath));

            m_result.OperationProgressUpdater.UpdateProgress(0);

            using(var db = new LocalRepairDatabase(m_options.Dbpath))
            using(var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, db))
            {
                m_result.SetDatabase(db);
                Utility.VerifyParameters(db, m_options);

                var tp = FilelistProcessor.RemoteListAnalysis(backend, m_options, db, m_result.BackendWriter);
                var buffer = new byte[m_options.Blocksize];
                var blockhasher = System.Security.Cryptography.HashAlgorithm.Create(m_options.BlockHashAlgorithm);
                var hashsize = blockhasher.HashSize / 8;

                if (blockhasher == null)
                    throw new Exception(string.Format(Strings.Foresthash.InvalidHashAlgorithm, m_options.BlockHashAlgorithm));
                if (!blockhasher.CanReuseTransform)
                    throw new Exception(string.Format(Strings.Foresthash.InvalidCryptoSystem, m_options.BlockHashAlgorithm));
				
                var progress = 0;
                var targetProgess = tp.ExtraVolumes.Count() + tp.MissingVolumes.Count() + tp.VerificationRequiredVolumes.Count();

                if (tp.ExtraVolumes.Count() > 0 || tp.MissingVolumes.Count() > 0 || tp.VerificationRequiredVolumes.Count() > 0)
                {
                    if (m_options.Dryrun)
                    {
                        if (!tp.BackupPrefixes.Contains(m_options.Prefix) && tp.ParsedVolumes.Count() > 0)
                        {
                            if (tp.BackupPrefixes.Length == 1)
                                throw new Exception(string.Format("Found no backup files with prefix {0}, but files with prefix {1}, did you forget to set the backup-prefix?", m_options.Prefix, tp.BackupPrefixes[0]));
                            else
                                throw new Exception(string.Format("Found no backup files with prefix {0}, but files with prefixes {1}, did you forget to set the backup-prefix?", m_options.Prefix, string.Join(", ", tp.BackupPrefixes)));
                        }
                        else if (tp.ParsedVolumes.Count() == 0 && tp.ExtraVolumes.Count() > 0)
                        {
                            throw new Exception(string.Format("No files were missing, but {0} remote files were, found, did you mean to run recreate-database?", tp.ExtraVolumes.Count()));
                        }
                    }

                    if (tp.VerificationRequiredVolumes.Any())
                    {
                        using(var testdb = new LocalTestDatabase(db))
                        {
                            foreach(var n in tp.VerificationRequiredVolumes)
                                try
                                {
                                    if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                                    {
                                        backend.WaitForComplete(db, null);
                                        return;
                                    }

                                    progress++;
                                    m_result.OperationProgressUpdater.UpdateProgress((float)progress / targetProgess);

                                    long size;
                                    string hash;
                                    KeyValuePair<string, IEnumerable<KeyValuePair<Duplicati.Library.Interface.TestEntryStatus, string>>> res;
                                   
                                    using (var tf = backend.GetWithInfo(n.Name, out size, out hash))
                                        res = TestHandler.TestVolumeInternals(testdb, n, tf, m_options, m_result, 1);

                                    if (res.Value.Any())
                                        throw new Exception(string.Format("Remote verification failure: {0}", res.Value.First()));

                                    if (!m_options.Dryrun)
                                    {
                                        m_result.AddMessage(string.Format("Sucessfully captured hash for {0}, updating database", n.Name));
                                        db.UpdateRemoteVolume(n.Name, RemoteVolumeState.Verified, size, hash);
                                    }

                                }
                                catch (Exception ex)
                                {
                                    m_result.AddError(string.Format("Failed to perform verification for file: {0}, please run verify; message: {1}", n.Name, ex.Message), ex);
                                    if (ex is System.Threading.ThreadAbortException)
                                        throw;
                                }
                        }
                    }

                    // TODO: It is actually possible to use the extra files if we parse them
                    foreach(var n in tp.ExtraVolumes)
                        try
                        {
                            if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                            {
                                backend.WaitForComplete(db, null);
                                return;
                            }

                            progress++;
                            m_result.OperationProgressUpdater.UpdateProgress((float)progress / targetProgess);
                        
                            if (!m_options.Dryrun)
                            {
                                db.RegisterRemoteVolume(n.File.Name, n.FileType, RemoteVolumeState.Deleting);								
                                backend.Delete(n.File.Name, n.File.Size);
                            }
                            else
                                m_result.AddDryrunMessage(string.Format("would delete file {0}", n.File.Name));
                        }
                        catch (Exception ex)
                        {
                            m_result.AddError(string.Format("Failed to perform cleanup for extra file: {0}, message: {1}", n.File.Name, ex.Message), ex);
                            if (ex is System.Threading.ThreadAbortException)
                                throw;
                        }
							
                    foreach(var n in tp.MissingVolumes)
                    {
                        IDisposable newEntry = null;
                        
                        try
                        {  
                            if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                            {
                                backend.WaitForComplete(db, null);
                                return;
                            }    

                            progress++;
                            m_result.OperationProgressUpdater.UpdateProgress((float)progress / targetProgess);

                            if (n.Type == RemoteVolumeType.Files)
                            {
                                var filesetId = db.GetFilesetIdFromRemotename(n.Name);
                                var w = new FilesetVolumeWriter(m_options, DateTime.UtcNow);
                                newEntry = w;
                                w.SetRemoteFilename(n.Name);
								
                                db.WriteFileset(w, null, filesetId);
	
                                w.Close();
                                if (m_options.Dryrun)
                                    m_result.AddDryrunMessage(string.Format("would re-upload fileset {0}, with size {1}, previous size {2}", n.Name, Library.Utility.Utility.FormatSizeString(new System.IO.FileInfo(w.LocalFilename).Length), Library.Utility.Utility.FormatSizeString(n.Size)));
                                else
                                {
                                    db.UpdateRemoteVolume(w.RemoteFilename, RemoteVolumeState.Uploading, -1, null, null);
                                    backend.Put(w);
                                }
                            }
                            else if (n.Type == RemoteVolumeType.Index)
                            {
                                var w = new IndexVolumeWriter(m_options);
                                newEntry = w;
                                w.SetRemoteFilename(n.Name);
								
                                foreach(var blockvolume in db.GetBlockVolumesFromIndexName(n.Name))
                                {								
                                    w.StartVolume(blockvolume.Name);
                                    var volumeid = db.GetRemoteVolumeID(blockvolume.Name);
									
                                    foreach(var b in db.GetBlocks(volumeid))
                                        w.AddBlock(b.Hash, b.Size);
										
                                    w.FinishVolume(blockvolume.Hash, blockvolume.Size);
                                    
                                    if (m_options.IndexfilePolicy == Options.IndexFileStrategy.Full)
                                        foreach(var b in db.GetBlocklists(volumeid, m_options.Blocksize, hashsize))
                                            w.WriteBlocklist(b.Item1, b.Item2, 0, b.Item3);
                                }
								
                                w.Close();
								
                                if (m_options.Dryrun)
                                    m_result.AddDryrunMessage(string.Format("would re-upload index file {0}, with size {1}, previous size {2}", n.Name, Library.Utility.Utility.FormatSizeString(new System.IO.FileInfo(w.LocalFilename).Length), Library.Utility.Utility.FormatSizeString(n.Size)));
                                else
                                {
                                    db.UpdateRemoteVolume(w.RemoteFilename, RemoteVolumeState.Uploading, -1, null, null);
                                    backend.Put(w);
                                }
                            }
                            else if (n.Type == RemoteVolumeType.Blocks)
                            {
                                var w = new BlockVolumeWriter(m_options);
                                newEntry = w;
                                w.SetRemoteFilename(n.Name);
                                
                                using(var mbl = db.CreateBlockList(n.Name))
                                {
                                    //First we grab all known blocks from local files
                                    foreach(var block in mbl.GetSourceFilesWithBlocks(m_options.Blocksize))
                                    {
                                        var hash = block.Hash;
                                        var size = (int)block.Size;
                                        
                                        foreach(var source in block.Sources)
                                        {
                                            var file = source.File;
                                            var offset = source.Offset;
                                            
                                            try
                                            {
                                                if (System.IO.File.Exists(file))
                                                    using(var f = System.IO.File.OpenRead(file))
                                                    {
                                                        f.Position = offset;
                                                        if (size == Library.Utility.Utility.ForceStreamRead(f, buffer, size))
                                                        {
                                                            var newhash = Convert.ToBase64String(blockhasher.ComputeHash(buffer, 0, size));
                                                            if (newhash == hash)
                                                            {
                                                                if (mbl.SetBlockRestored(hash, size))
                                                                    w.AddBlock(hash, buffer, 0, size, Duplicati.Library.Interface.CompressionHint.Default);
                                                                break;
                                                            }
                                                        }
                                                    }
                                            }
                                            catch (Exception ex)
                                            {
                                                m_result.AddError(string.Format("Failed to access file: {0}", file), ex);
                                            }
                                        }
                                    }
                                    
                                    //Then we grab all remote volumes that have the missing blocks
                                    foreach(var vol in new AsyncDownloader(mbl.GetMissingBlockSources().ToList(), backend))
                                    {
                                        try
                                        {
                                            using(var tmpfile = vol.TempFile)
                                            using(var f = new BlockVolumeReader(RestoreHandler.GetCompressionModule(vol.Name), tmpfile, m_options))
                                                foreach(var b in f.Blocks)
                                                    if (mbl.SetBlockRestored(b.Key, b.Value))
                                                        if (f.ReadBlock(b.Key, buffer) == b.Value)
                                                            w.AddBlock(b.Key, buffer, 0, (int)b.Value, Duplicati.Library.Interface.CompressionHint.Default);
                                        }
                                        catch (Exception ex)
                                        {
                                            m_result.AddError(string.Format("Failed to access remote file: {0}", vol.Name), ex);
                                        }
                                    }
                                    
                                    // If we managed to recover all blocks, NICE!
                                    var missingBlocks = mbl.GetMissingBlocks().Count();
                                    if (missingBlocks > 0)
                                    {                                    
                                        //TODO: How do we handle this situation?
                                        m_result.AddMessage(string.Format("Repair cannot acquire {0} required blocks for volume {1}, which are required by the following filesets: ", missingBlocks, n.Name));
                                        foreach(var f in mbl.GetFilesetsUsingMissingBlocks())
                                            m_result.AddMessage(f.Name);
                                        
                                        if (!m_options.Dryrun)
                                        {
                                            m_result.AddMessage("This may be fixed by deleting the filesets and running repair again");
                                            
                                            throw new Exception(string.Format("Repair not possible, missing {0} blocks!!!", missingBlocks));
                                        }
                                    }
                                    else
                                    {
                                        if (m_options.Dryrun)
                                            m_result.AddDryrunMessage(string.Format("would re-upload block file {0}, with size {1}, previous size {2}", n.Name, Library.Utility.Utility.FormatSizeString(new System.IO.FileInfo(w.LocalFilename).Length), Library.Utility.Utility.FormatSizeString(n.Size)));
                                        else
                                        {
                                            db.UpdateRemoteVolume(w.RemoteFilename, RemoteVolumeState.Uploading, -1, null, null);
                                            backend.Put(w);
                                        }
                                    }
                                }
                            }
                        }
                        catch (Exception ex)
                        {
                            if (newEntry != null)
                                try { newEntry.Dispose(); }
                                catch { }
                                finally { newEntry = null; }
                                
                            m_result.AddError(string.Format("Failed to perform cleanup for missing file: {0}, message: {1}", n.Name, ex.Message), ex);
                            
                            if (ex is System.Threading.ThreadAbortException)
                                throw;
                        }
                    }
                }
                else
                {
                    m_result.AddMessage("Destination and database are synchronized, not making any changes");
                }

                m_result.OperationProgressUpdater.UpdateProgress(1);				
				backend.WaitForComplete(db, null);
                db.WriteResults();

			}
        }
 public IFilesAndMetadata GetMissingBlockData(BlockVolumeReader curvolume, long blocksize)
 {
     return new FilesAndMetadata(m_connection, m_tempfiletable, m_tempblocktable, blocksize, curvolume);
 }
示例#5
0
     private static void PatchWithBlocklist(LocalRestoreDatabase database, BlockVolumeReader blocks, Options options, RestoreResults result, byte[] blockbuffer)
     {
         var blocksize = options.Blocksize;
         var updateCounter = 0L;
         using(var blockmarker = database.CreateBlockMarker())
         {
             foreach(var restorelist in database.GetFilesWithMissingBlocks(blocks))
             {
                 var targetpath = restorelist.Path;
                 result.AddVerboseMessage("Patching file with remote data: {0}", targetpath);
                 
                 if (options.Dryrun)
                 {
                     result.AddDryrunMessage(string.Format("Would patch file with remote data: {0}", targetpath));
                 }
                 else
                 {
                     try
                     {   
                         var folderpath = m_systemIO.PathGetDirectoryName(targetpath);
                         if (!options.Dryrun && !m_systemIO.DirectoryExists(folderpath))
                         {
                             result.AddWarning(string.Format("Creating missing folder {0} for  file {1}", folderpath, targetpath), null);
                             m_systemIO.DirectoryCreate(folderpath);
                         }
                         
                         // TODO: Much faster if we iterate the volume and checks what blocks are used,
                         // because the compressors usually like sequential reading
                         using(var file = m_systemIO.FileOpenReadWrite(targetpath))
                             foreach(var targetblock in restorelist.Blocks)
                             {
                                 file.Position = targetblock.Offset;
                                 var size = blocks.ReadBlock(targetblock.Key, blockbuffer);
                                 if (targetblock.Size == size)
                                 {
                                     file.Write(blockbuffer, 0, size);
                                     blockmarker.SetBlockRestored(restorelist.FileID, targetblock.Offset / blocksize, targetblock.Key, size);
                                 }   
                                 
                             }
                         
                         if (updateCounter++ % 20 == 0)
                             blockmarker.UpdateProcessed(result.OperationProgressUpdater);
                     }
                     catch (Exception ex)
                     {
                         result.AddWarning(string.Format("Failed to patch file: \"{0}\", message: {1}, message: {1}", targetpath, ex.Message), ex);
                     }
 	                
                     try
                     {
                         ApplyMetadata(targetpath, database);
                     }
                     catch (Exception ex)
                     {
                         result.AddWarning(string.Format("Failed to apply metadata to file: \"{0}\", message: {1}", targetpath, ex.Message), ex);
                     }
                 }
             }
             
             blockmarker.UpdateProcessed(result.OperationProgressUpdater);
             blockmarker.Commit(result);
         }
     }
            public FilesAndMetadata(System.Data.IDbConnection connection, string filetablename, string blocktablename, long blocksize, BlockVolumeReader curvolume)
            {
                m_filetablename = filetablename;
                m_blocktablename = blocktablename;
                m_blocksize = blocksize;
                m_connection = connection;

                using (var c = m_connection.CreateCommand())
                {
                    m_tmptable = "VolumeFiles-" + Library.Utility.Utility.ByteArrayAsHexString(Guid.NewGuid().ToByteArray());
                    c.CommandText = string.Format(@"CREATE TEMPORARY TABLE ""{0}"" ( ""Hash"" TEXT NOT NULL, ""Size"" INTEGER NOT NULL )", m_tmptable);
                    c.ExecuteNonQuery();


                    c.CommandText = string.Format(@"INSERT INTO ""{0}"" (""Hash"", ""Size"") VALUES (?,?)", m_tmptable);
                    c.AddParameters(2);
                    foreach (var s in curvolume.Blocks)
                    {
                        c.SetParameterValue(0, s.Key);
                        c.SetParameterValue(1, s.Value);
                        c.ExecuteNonQuery();
                    }

                    // The index _HashSizeIndex is not needed anymore. Index on "Blocks-..." is used on Join in GetMissingBlocks

                }
            }
示例#7
0
 public IEnumerable<IVolumePatch> GetFilesWithMissingBlocks(BlockVolumeReader curvolume)
 {
     return new VolumePatchEnumerable(m_connection, m_tempfiletable, m_tempblocktable, m_blocksize, curvolume);
 }
示例#8
0
		internal bool DoCompact(LocalDeleteDatabase db, bool hasVerifiedBackend, ref System.Data.IDbTransaction transaction)
        {
            var report = db.GetCompactReport(m_options.VolumeSize, m_options.Threshold, m_options.SmallFileSize, m_options.SmallFileMaxCount, transaction);
            report.ReportCompactData(m_result);
			
            if (report.ShouldReclaim || report.ShouldCompact)
            {
                using(var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, db))
                {
                    if (!hasVerifiedBackend && !m_options.NoBackendverification)
                        FilelistProcessor.VerifyRemoteList(backend, m_options, db, m_result.BackendWriter);
		
                    BlockVolumeWriter newvol = new BlockVolumeWriter(m_options);
                    newvol.VolumeID = db.RegisterRemoteVolume(newvol.RemoteFilename, RemoteVolumeType.Blocks, RemoteVolumeState.Temporary, transaction);
	
                    IndexVolumeWriter newvolindex = null;
                    if (m_options.IndexfilePolicy != Options.IndexFileStrategy.None)
                    {
                        newvolindex = new IndexVolumeWriter(m_options);
                        newvolindex.VolumeID = db.RegisterRemoteVolume(newvolindex.RemoteFilename, RemoteVolumeType.Index, RemoteVolumeState.Temporary, transaction);
                        db.AddIndexBlockLink(newvolindex.VolumeID, newvol.VolumeID, transaction);
                        newvolindex.StartVolume(newvol.RemoteFilename);
                    }
					
                    long blocksInVolume = 0;
                    long discardedBlocks = 0;
                    long discardedSize = 0;
                    byte[] buffer = new byte[m_options.Blocksize];
                    var remoteList = db.GetRemoteVolumes().Where(n => n.State == RemoteVolumeState.Uploaded || n.State == RemoteVolumeState.Verified).ToArray();
					
                    //These are for bookkeeping
                    var uploadedVolumes = new List<KeyValuePair<string, long>>();
                    var deletedVolumes = new List<KeyValuePair<string, long>>();
                    var downloadedVolumes = new List<KeyValuePair<string, long>>();
					
                    //We start by deleting unused volumes to save space before uploading new stuff
                    var fullyDeleteable = (from v in remoteList
                                           where report.DeleteableVolumes.Contains(v.Name)
                                           select (IRemoteVolume)v).ToList();                    
                    deletedVolumes.AddRange(DoDelete(db, backend, fullyDeleteable, ref transaction));

                    // This list is used to pick up unused volumes,
                    // so they can be deleted once the upload of the
                    // required fragments is complete
                    var deleteableVolumes = new List<IRemoteVolume>();

                    if (report.ShouldCompact)
                    {
                        var volumesToDownload = (from v in remoteList
                                                 where report.CompactableVolumes.Contains(v.Name)
                                                 select (IRemoteVolume)v).ToList();
						
                        using(var q = db.CreateBlockQueryHelper(m_options, transaction))
                        {
                            foreach(var entry in new AsyncDownloader(volumesToDownload, backend))
                            using(var tmpfile = entry.TempFile)
                            {
                                if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                                {
                                    backend.WaitForComplete(db, transaction);
                                    return false;
                                }
                                    
								downloadedVolumes.Add(new KeyValuePair<string, long>(entry.Name, entry.Size));
								var inst = VolumeBase.ParseFilename(entry.Name);
								using(var f = new BlockVolumeReader(inst.CompressionModule, tmpfile, m_options))
								{
									foreach(var e in f.Blocks)
									{
                                        if (q.UseBlock(e.Key, e.Value, transaction))
										{
											//TODO: How do we get the compression hint? Reverse query for filename in db?
											var s = f.ReadBlock(e.Key, buffer);
											if (s != e.Value)
												throw new Exception("Size mismatch problem, {0} vs {1}");
												
											newvol.AddBlock(e.Key, buffer, 0, s, Duplicati.Library.Interface.CompressionHint.Compressible);
											if (newvolindex != null)
												newvolindex.AddBlock(e.Key, e.Value);
												
											db.MoveBlockToNewVolume(e.Key, e.Value, newvol.VolumeID, transaction);
											blocksInVolume++;
											
											if (newvol.Filesize > m_options.VolumeSize)
											{
												uploadedVolumes.Add(new KeyValuePair<string, long>(newvol.RemoteFilename, new System.IO.FileInfo(newvol.LocalFilename).Length));
												if (newvolindex != null)
													uploadedVolumes.Add(new KeyValuePair<string, long>(newvolindex.RemoteFilename, new System.IO.FileInfo(newvolindex.LocalFilename).Length));
	
												if (!m_options.Dryrun)
													backend.Put(newvol, newvolindex);
												else
													m_result.AddDryrunMessage(string.Format("Would upload generated blockset of size {0}", Library.Utility.Utility.FormatSizeString(new System.IO.FileInfo(newvol.LocalFilename).Length)));
												
												
												newvol = new BlockVolumeWriter(m_options);
												newvol.VolumeID = db.RegisterRemoteVolume(newvol.RemoteFilename, RemoteVolumeType.Blocks, RemoteVolumeState.Temporary, transaction);
				
												if (m_options.IndexfilePolicy != Options.IndexFileStrategy.None)
												{
													newvolindex = new IndexVolumeWriter(m_options);
													newvolindex.VolumeID = db.RegisterRemoteVolume(newvolindex.RemoteFilename, RemoteVolumeType.Index, RemoteVolumeState.Temporary, transaction);
                                                    db.AddIndexBlockLink(newvolindex.VolumeID, newvol.VolumeID, transaction);
													newvolindex.StartVolume(newvol.RemoteFilename);
												}
												
												blocksInVolume = 0;
												
												//After we upload this volume, we can delete all previous encountered volumes
												deletedVolumes.AddRange(DoDelete(db, backend, deleteableVolumes, ref transaction));
                                                deleteableVolumes = new List<IRemoteVolume>();
											}
										}
										else
										{
											discardedBlocks++;
											discardedSize += e.Value;
										}
									}
								}
	
								deleteableVolumes.Add(entry);
							}
							
							if (blocksInVolume > 0)
							{
								uploadedVolumes.Add(new KeyValuePair<string, long>(newvol.RemoteFilename, new System.IO.FileInfo(newvol.LocalFilename).Length));
								if (newvolindex != null)
									uploadedVolumes.Add(new KeyValuePair<string, long>(newvolindex.RemoteFilename, new System.IO.FileInfo(newvolindex.LocalFilename).Length));
								if (!m_options.Dryrun)
									backend.Put(newvol, newvolindex);
								else
									m_result.AddDryrunMessage(string.Format("Would upload generated blockset of size {0}", Library.Utility.Utility.FormatSizeString(new System.IO.FileInfo(newvol.LocalFilename).Length)));
							}
							else
							{
				                db.RemoveRemoteVolume(newvol.RemoteFilename, transaction);
			                    if (newvolindex != null)
			                    {
				                    db.RemoveRemoteVolume(newvolindex.RemoteFilename, transaction);
				                    newvolindex.FinishVolume(null, 0);
			                    }
							}
						}
					}
					
					deletedVolumes.AddRange(DoDelete(db, backend, deleteableVolumes, ref transaction));
										
                    var downloadSize = downloadedVolumes.Where(x => x.Value >= 0).Aggregate(0L, (a,x) => a + x.Value);
                    var deletedSize = deletedVolumes.Where(x => x.Value >= 0).Aggregate(0L, (a,x) => a + x.Value);
                    var uploadSize = uploadedVolumes.Where(x => x.Value >= 0).Aggregate(0L, (a,x) => a + x.Value);
					
                    m_result.DeletedFileCount = deletedVolumes.Count;
                    m_result.DownloadedFileCount = downloadedVolumes.Count;
                    m_result.UploadedFileCount = uploadedVolumes.Count;
                    m_result.DeletedFileSize = deletedSize;
                    m_result.DownloadedFileSize = downloadSize;
                    m_result.UploadedFileSize = uploadSize;
                    m_result.Dryrun = m_options.Dryrun;
                    
					if (m_result.Dryrun)
					{
                        if (downloadedVolumes.Count == 0)
                            m_result.AddDryrunMessage(string.Format("Would delete {0} files, which would reduce storage by {1}", m_result.DeletedFileCount, Library.Utility.Utility.FormatSizeString(m_result.DeletedFileSize)));
                        else
                            m_result.AddDryrunMessage(string.Format("Would download {0} file(s) with a total size of {1}, delete {2} file(s) with a total size of {3}, and compact to {4} file(s) with a size of {5}, which would reduce storage by {6} file(s) and {7}", m_result.DownloadedFileCount, Library.Utility.Utility.FormatSizeString(m_result.DownloadedFileSize), m_result.DeletedFileCount, Library.Utility.Utility.FormatSizeString(m_result.DeletedFileSize), m_result.UploadedFileCount, Library.Utility.Utility.FormatSizeString(m_result.UploadedFileSize), m_result.DeletedFileCount - m_result.UploadedFileCount, Library.Utility.Utility.FormatSizeString(m_result.DeletedFileSize - m_result.UploadedFileSize)));
					}
					else
					{
                        if (m_result.DownloadedFileCount == 0)
                            m_result.AddMessage(string.Format("Deleted {0} files, which reduced storage by {1}", m_result.DeletedFileCount, Library.Utility.Utility.FormatSizeString(m_result.DeletedFileSize)));
                        else
                            m_result.AddMessage(string.Format("Downloaded {0} file(s) with a total size of {1}, deleted {2} file(s) with a total size of {3}, and compacted to {4} file(s) with a size of {5}, which reduced storage by {6} file(s) and {7}", m_result.DownloadedFileCount, Library.Utility.Utility.FormatSizeString(downloadSize), m_result.DeletedFileCount, Library.Utility.Utility.FormatSizeString(m_result.DeletedFileSize), m_result.UploadedFileCount, Library.Utility.Utility.FormatSizeString(m_result.UploadedFileSize), m_result.DeletedFileCount - m_result.UploadedFileCount, Library.Utility.Utility.FormatSizeString(m_result.DeletedFileSize - m_result.UploadedFileSize)));
					}
							
					backend.WaitForComplete(db, transaction);
				}
                
                return (m_result.DeletedFileCount + m_result.UploadedFileCount) > 0;
			}
			else
			{
                return false;
			}
		}
        /// <summary>
        /// Run the recreate procedure
        /// </summary>
        /// <param name="dbparent">The database to restore into</param>
        /// <param name="updating">True if this is an update call, false otherwise</param>
        /// <param name="filter">A filter that can be used to disregard certain remote files, intended to be used to select a certain filelist</param>
        /// <param name="filenamefilter">Filters the files in a filelist to prevent downloading unwanted data</param>
        /// <param name="blockprocessor">A callback hook that can be used to work with downloaded block volumes, intended to be use to recover data blocks while processing blocklists</param>
        internal void DoRun(LocalDatabase dbparent, bool updating, Library.Utility.IFilter filter = null, NumberedFilterFilelistDelegate filelistfilter = null, BlockVolumePostProcessor blockprocessor = null)
        {
            m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Recreate_Running);

            //We build a local database in steps.
            using(var restoredb = new LocalRecreateDatabase(dbparent, m_options))
            using(var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, restoredb))
            {
				restoredb.RepairInProgress = true;

                var volumeIds = new Dictionary<string, long>();

                var rawlist = backend.List();
		
                //First step is to examine the remote storage to see what
                // kind of data we can find
                var remotefiles =
                (from x in rawlist
                let n = VolumeBase.ParseFilename(x)
                where
                    n != null
                        &&
                    n.Prefix == m_options.Prefix
                select n).ToArray(); //ToArray() ensures that we do not remote-request it multiple times

                if (remotefiles.Length == 0)
                {
                    if (rawlist.Count == 0)
                        throw new Exception("No files were found at the remote location, perhaps the target url is incorrect?");
                    else
                    {
                        var tmp = 
					(from x in rawlist
                		let n = VolumeBase.ParseFilename(x)
                	where
                    	n != null
                    select n.Prefix).ToArray();
                
                        var types = tmp.Distinct().ToArray();
                        if (tmp.Length == 0)
                            throw new Exception(string.Format("Found {0} files at the remote storage, but none that could be parsed", rawlist.Count));
                        else if (types.Length == 1)
                            throw new Exception(string.Format("Found {0} parse-able files with the prefix {1}, did you forget to set the backup-prefix?", tmp.Length, types[0]));
                        else
                            throw new Exception(string.Format("Found {0} parse-able files (of {1} files) with different prefixes: {2}, did you forget to set the backup-prefix?", tmp.Length, rawlist.Count, string.Join(", ", types)));
                    }
                }

                //Then we select the filelist we should work with,
                // and create the filelist table to fit
                IEnumerable<IParsedVolume> filelists =
                    from n in remotefiles
                    where n.FileType == RemoteVolumeType.Files
                    orderby n.Time descending
                    select n;

                if (filelists.Count() <= 0)
                    throw new Exception(string.Format("No filelists found on the remote destination"));
                
                if (filelistfilter != null)
                    filelists = filelistfilter(filelists).Select(x => x.Value).ToArray();

                if (filelists.Count() <= 0)
                    throw new Exception(string.Format("No filelists"));

                // If we are updating, all files should be accounted for
                foreach(var fl in remotefiles)
                    volumeIds[fl.File.Name] = updating ? restoredb.GetRemoteVolumeID(fl.File.Name) : restoredb.RegisterRemoteVolume(fl.File.Name, fl.FileType, fl.File.Size, RemoteVolumeState.Uploaded);

                var hasUpdatedOptions = false;

                if (updating)
                {
                    Utility.UpdateOptionsFromDb(restoredb, m_options);
                    Utility.VerifyParameters(restoredb, m_options);
                }

                //Record all blocksets and files needed
                using(var tr = restoredb.BeginTransaction())
                {
                    var filelistWork = (from n in filelists orderby n.Time select new RemoteVolume(n.File) as IRemoteVolume).ToList();
                    m_result.AddMessage(string.Format("Rebuild database started, downloading {0} filelists", filelistWork.Count));

                    var progress = 0;

                    // Register the files we are working with, if not already updated
                    if (updating)
                    {
                        foreach(var n in filelists)
                            if (volumeIds[n.File.Name] == -1)
                                volumeIds[n.File.Name] = restoredb.RegisterRemoteVolume(n.File.Name, n.FileType, RemoteVolumeState.Uploaded, n.File.Size, new TimeSpan(0), tr);
                    }
                                
                    var isFirstFilelist = true;
                    var blocksize = m_options.Blocksize;
                    var hashes_pr_block = blocksize / m_options.BlockhashSize;

                    foreach(var entry in new AsyncDownloader(filelistWork, backend))
                        try
                        {
                            if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                            {
                                backend.WaitForComplete(restoredb, null);
                                return;
                            }    
                        
                            progress++;
                            if (filelistWork.Count == 1 && m_options.RepairOnlyPaths)
                                m_result.OperationProgressUpdater.UpdateProgress(0.5f);
                            else
                                m_result.OperationProgressUpdater.UpdateProgress(((float)progress / filelistWork.Count()) * (m_options.RepairOnlyPaths ? 1f : 0.2f));

                            using(var tmpfile = entry.TempFile)
                            {
                                isFirstFilelist = false;

                                if (entry.Hash != null && entry.Size > 0)
                                    restoredb.UpdateRemoteVolume(entry.Name, RemoteVolumeState.Verified, entry.Size, entry.Hash, tr);

                                var parsed = VolumeBase.ParseFilename(entry.Name);

                                if (!hasUpdatedOptions && !updating) 
                                {
                                    VolumeReaderBase.UpdateOptionsFromManifest(parsed.CompressionModule, tmpfile, m_options);
                                    hasUpdatedOptions = true;
                                    // Recompute the cached sizes
                                    blocksize = m_options.Blocksize;
                                    hashes_pr_block = blocksize / m_options.BlockhashSize;
                                }


                                // Create timestamped operations based on the file timestamp
                                var filesetid = restoredb.CreateFileset(volumeIds[entry.Name], parsed.Time, tr);
                                using(var filelistreader = new FilesetVolumeReader(parsed.CompressionModule, tmpfile, m_options))
                                    foreach(var fe in filelistreader.Files.Where(x => Library.Utility.FilterExpression.Matches(filter, x.Path)))
                                    {
                                        try
                                        {
                                            if (fe.Type == FilelistEntryType.Folder)
                                            {
                                                restoredb.AddDirectoryEntry(filesetid, fe.Path, fe.Time, fe.Metahash, fe.Metahash == null ? -1 : fe.Metasize, tr);
                                            }
                                            else if (fe.Type == FilelistEntryType.File)
                                            {
                                                var expectedblocks = (fe.Size + blocksize - 1)  / blocksize;
                                                var expectedblocklisthashes = (expectedblocks + hashes_pr_block - 1) / hashes_pr_block;
                                                if (expectedblocks <= 1) expectedblocklisthashes = 0;

                                                var blocksetid = restoredb.AddBlockset(fe.Hash, fe.Size, fe.BlocklistHashes, expectedblocklisthashes, tr);
                                                restoredb.AddFileEntry(filesetid, fe.Path, fe.Time, blocksetid, fe.Metahash, fe.Metahash == null ? -1 : fe.Metasize, tr);
                                            }
                                            else if (fe.Type == FilelistEntryType.Symlink)
                                            {
                                                restoredb.AddSymlinkEntry(filesetid, fe.Path, fe.Time, fe.Metahash, fe.Metahash == null ? -1 : fe.Metasize, tr);
                                            }
                                            else
                                            {
                                                m_result.AddWarning(string.Format("Skipping file-entry with unknown type {0}: {1} ", fe.Type, fe.Path), null);
                                            }
                                        }
                                        catch (Exception ex)
                                        {
                                            m_result.AddWarning(string.Format("Failed to process file-entry: {0}", fe.Path), ex);
                                        }
                                    }
                            }
                        }
                        catch (Exception ex)
                        {
                            m_result.AddWarning(string.Format("Failed to process file: {0}", entry.Name), ex);
                            if (ex is System.Threading.ThreadAbortException)
                                throw;

                            if (isFirstFilelist && ex is System.Security.Cryptography.CryptographicException)
                                throw;
                        }

                    //Make sure we write the config
                    if (!updating)
                        Utility.VerifyParameters(restoredb, m_options, tr);

                    using(new Logging.Timer("CommitUpdateFilesetFromRemote"))
                        tr.Commit();
                }
            
                if (!m_options.RepairOnlyPaths)
                {
                    var hashalg = System.Security.Cryptography.HashAlgorithm.Create(m_options.BlockHashAlgorithm);
                    if (hashalg == null)
                        throw new Exception(Strings.Common.InvalidHashAlgorithm(m_options.BlockHashAlgorithm));
                    var hashsize = hashalg.HashSize / 8;

                    //Grab all index files, and update the block table
                    using(var tr = restoredb.BeginTransaction())
                    {
                        var indexfiles = (
                                         from n in remotefiles
                                          where n.FileType == RemoteVolumeType.Index
                                          select new RemoteVolume(n.File) as IRemoteVolume).ToList();

                        m_result.AddMessage(string.Format("Filelists restored, downloading {0} index files", indexfiles.Count));

                        var progress = 0;
                                    
                        foreach(var sf in new AsyncDownloader(indexfiles, backend))
                            try
                            {
                                if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                                {
                                    backend.WaitForComplete(restoredb, null);
                                    return;
                                }

                                progress++;
                                m_result.OperationProgressUpdater.UpdateProgress((((float)progress / indexfiles.Count) * 0.5f) + 0.2f);

                                using(var tmpfile = sf.TempFile)
                                {
                                    if (sf.Hash != null && sf.Size > 0)
                                        restoredb.UpdateRemoteVolume(sf.Name, RemoteVolumeState.Verified, sf.Size, sf.Hash, tr);
                
                                    using(var svr = new IndexVolumeReader(RestoreHandler.GetCompressionModule(sf.Name), tmpfile, m_options, hashsize))
                                    {
                                        foreach(var a in svr.Volumes)
                                        {
                                            var filename = a.Filename;
                                            var volumeID = restoredb.GetRemoteVolumeID(filename);

                                            // No such file
                                            if (volumeID < 0)
                                                volumeID = ProbeForMatchingFilename(ref filename, restoredb);

                                            // Still broken, register a missing item
                                            if (volumeID < 0)
                                            {
                                                var p = VolumeBase.ParseFilename(filename);
                                                if (p == null)
                                                    throw new Exception(string.Format("Unable to parse filename: {0}", filename));
                                                m_result.AddError(string.Format("Remote file referenced as {0}, but not found in list, registering a missing remote file", filename), null);
                                                volumeID = restoredb.RegisterRemoteVolume(filename, p.FileType, RemoteVolumeState.Verified, tr);
                                            }
                                            
                                            //Add all block/volume mappings
                                            foreach(var b in a.Blocks)
                                                restoredb.UpdateBlock(b.Key, b.Value, volumeID, tr);

                                            restoredb.UpdateRemoteVolume(filename, RemoteVolumeState.Verified, a.Length, a.Hash, tr);
                                            restoredb.AddIndexBlockLink(restoredb.GetRemoteVolumeID(sf.Name), volumeID, tr);
                                        }
                                
                                        //If there are blocklists in the index file, update the blocklists
                                        foreach(var b in svr.BlockLists)
                                            restoredb.UpdateBlockset(b.Hash, b.Blocklist, tr);
                                    }
                                }
                            }
                            catch (Exception ex)
                            {
                                //Not fatal
                                m_result.AddWarning(string.Format("Failed to process index file: {0}", sf.Name), ex);
                                if (ex is System.Threading.ThreadAbortException)
                                    throw;
                            }

                        using(new Logging.Timer("CommitRecreatedDb"))
                            tr.Commit();
                    
                        // TODO: In some cases, we can avoid downloading all index files, 
                        // if we are lucky and pick the right ones
                    }

                    // We have now grabbed as much information as possible,
                    // if we are still missing data, we must now fetch block files
                    restoredb.FindMissingBlocklistHashes(hashsize, m_options.Blocksize, null);
                
                    //We do this in three passes
                    for(var i = 0; i < 3; i++)
                    {
                        // Grab the list matching the pass type
						var lst = restoredb.GetMissingBlockListVolumes(i, m_options.Blocksize, hashsize).ToList();
                        if (lst.Count > 0)
                        {
                            switch (i)
                            {
                                case 0:
                                    if (m_options.Verbose)
                                        m_result.AddVerboseMessage("Processing required {0} blocklist volumes: {1}", lst.Count, string.Join(", ", lst.Select(x => x.Name)));
                                    else
                                        m_result.AddMessage(string.Format("Processing required {0} blocklist volumes", lst.Count));
                                    break;
                                case 1:
                                    if (m_options.Verbose)
                                        m_result.AddVerboseMessage("Probing {0} candidate blocklist volumes: {1}", lst.Count, string.Join(", ", lst.Select(x => x.Name)));
                                    else
                                        m_result.AddMessage(string.Format("Probing {0} candidate blocklist volumes", lst.Count));
                                    break;
                                default:
                                    if (m_options.Verbose)
                                        m_result.AddVerboseMessage("Processing all of the {0} volumes for blocklists: {1}", lst.Count, string.Join(", ", lst.Select(x => x.Name)));
                                    else
                                        m_result.AddMessage(string.Format("Processing all of the {0} volumes for blocklists", lst.Count));
                                    break;
                            }
                        }

                        var progress = 0;
                        foreach(var sf in new AsyncDownloader(lst, backend))
                            using(var tmpfile = sf.TempFile)
                            using(var rd = new BlockVolumeReader(RestoreHandler.GetCompressionModule(sf.Name), tmpfile, m_options))
                            using(var tr = restoredb.BeginTransaction())
                            {
                                if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                                {
                                    backend.WaitForComplete(restoredb, null);
                                    return;
                                }    
                            
                                progress++;
                                m_result.OperationProgressUpdater.UpdateProgress((((float)progress / lst.Count) * 0.1f) + 0.7f + (i * 0.1f));

                                var volumeid = restoredb.GetRemoteVolumeID(sf.Name);

                                restoredb.UpdateRemoteVolume(sf.Name, RemoteVolumeState.Uploaded, sf.Size, sf.Hash, tr);
                            
                                // Update the block table so we know about the block/volume map
                                foreach(var h in rd.Blocks)
                                    restoredb.UpdateBlock(h.Key, h.Value, volumeid, tr);
                            
                                // Grab all known blocklists from the volume
                                foreach(var blocklisthash in restoredb.GetBlockLists(volumeid))
                                    restoredb.UpdateBlockset(blocklisthash, rd.ReadBlocklist(blocklisthash, hashsize), tr);
    
                                // Update tables so we know if we are done
                                restoredb.FindMissingBlocklistHashes(hashsize, m_options.Blocksize, tr);
                        
                                using(new Logging.Timer("CommitRestoredBlocklist"))
                                    tr.Commit();
    
                                //At this point we can patch files with data from the block volume
                                if (blockprocessor != null)
                                    blockprocessor(sf.Name, rd);
                            }
                    }
                }
                
				backend.WaitForComplete(restoredb, null);

                m_result.AddMessage("Recreate completed, verifying the database consistency");

                //All done, we must verify that we have all blocklist fully intact
                // if this fails, the db will not be deleted, so it can be used,
                // except to continue a backup
                restoredb.VerifyConsistency(null, m_options.Blocksize, m_options.BlockhashSize);

                m_result.AddMessage("Recreate completed, and consistency checks completed, marking database as complete");

				restoredb.RepairInProgress = false;
            }
        }
示例#10
0
        private static void PatchWithBlocklist(LocalRestoreDatabase database, BlockVolumeReader blocks, Options options, RestoreResults result, byte[] blockbuffer, RestoreHandlerMetadataStorage metadatastorage)
        {
            var blocksize = options.Blocksize;
            var updateCounter = 0L;
            var fullblockverification = options.FullBlockVerification;
            var blockhasher = fullblockverification ? System.Security.Cryptography.HashAlgorithm.Create(options.BlockHashAlgorithm) : null;

            using(var blockmarker = database.CreateBlockMarker())
            using(var volumekeeper = database.GetMissingBlockData(blocks, options.Blocksize))
            {
                foreach(var restorelist in volumekeeper.FilesWithMissingBlocks)
                {
                    var targetpath = restorelist.Path;
                    result.AddVerboseMessage("Patching file with remote data: {0}", targetpath);
                    
                    if (options.Dryrun)
                    {
                        result.AddDryrunMessage(string.Format("Would patch file with remote data: {0}", targetpath));
                    }
                    else
                    {
                        try
                        {   
                            var folderpath = m_systemIO.PathGetDirectoryName(targetpath);
                            if (!options.Dryrun && !m_systemIO.DirectoryExists(folderpath))
                            {
                                result.AddWarning(string.Format("Creating missing folder {0} for  file {1}", folderpath, targetpath), null);
                                m_systemIO.DirectoryCreate(folderpath);
                            }
                            
                            // TODO: Much faster if we iterate the volume and checks what blocks are used,
                            // because the compressors usually like sequential reading
                            using(var file = m_systemIO.FileOpenReadWrite(targetpath))
                                foreach(var targetblock in restorelist.Blocks)
                                {
                                    file.Position = targetblock.Offset;
                                    var size = blocks.ReadBlock(targetblock.Key, blockbuffer);
                                    if (targetblock.Size == size)
                                    {
                                        var valid = !fullblockverification;
                                        if (!valid)
                                        {
                                            blockhasher.Initialize();
                                            var key = Convert.ToBase64String(blockhasher.ComputeHash(blockbuffer, 0, size));
                                            if (targetblock.Key == key)
                                                valid = true;
                                            else
                                                result.AddWarning(string.Format("Invalid block detected for {0}, expected hash: {1}, actual hash: {2}", targetpath, targetblock.Key, key), null);
                                        }

                                        if (valid)
                                        {
                                            file.Write(blockbuffer, 0, size);
                                            blockmarker.SetBlockRestored(restorelist.FileID, targetblock.Offset / blocksize, targetblock.Key, size, false);
                                        }
                                    }   
                                }
                            
                            if (updateCounter++ % 20 == 0)
                                blockmarker.UpdateProcessed(result.OperationProgressUpdater);
                        }
                        catch (Exception ex)
                        {
                            result.AddWarning(string.Format("Failed to patch file: \"{0}\", message: {1}, message: {1}", targetpath, ex.Message), ex);
                        }
                    }
                }

                if (!options.SkipMetadata)
                {
                    foreach(var restoremetadata in volumekeeper.MetadataWithMissingBlocks)
                    {
                        var targetpath = restoremetadata.Path;
                        result.AddVerboseMessage("Recording metadata from remote data: {0}", targetpath);
                    
                        try
                        {
                            // TODO: When we support multi-block metadata this needs to deal with it
                            using(var ms = new System.IO.MemoryStream())
                            {
                                foreach(var targetblock in restoremetadata.Blocks)
                                {
                                    ms.Position = targetblock.Offset;
                                    var size = blocks.ReadBlock(targetblock.Key, blockbuffer);
                                    if (targetblock.Size == size)
                                    {
                                        ms.Write(blockbuffer, 0, size);
                                        blockmarker.SetBlockRestored(restoremetadata.FileID, targetblock.Offset / blocksize, targetblock.Key, size, true);
                                    }   
                                }

                                ms.Position = 0; 
                                metadatastorage.Add(targetpath, ms);
                                //blockmarker.RecordMetadata(restoremetadata.FileID, ms);
                            }
                        }
                        catch (Exception ex)
                        {
                            result.AddWarning(string.Format("Failed to record metadata for file: \"{0}\", message: {1}", targetpath, ex.Message), ex);
                        }
                    }
                }
                
                blockmarker.UpdateProcessed(result.OperationProgressUpdater);
                blockmarker.Commit(result);
            }
        }