private static void CreateDirectoryStructure(LocalRestoreDatabase database, Options options, RestoreResults result) { // This part is not protected by try/catch as we need the target folder to exist if (!string.IsNullOrEmpty(options.Restorepath)) if (!m_systemIO.DirectoryExists(options.Restorepath)) { if (options.Verbose) result.AddVerboseMessage("Creating folder: {0}", options.Restorepath); if (options.Dryrun) result.AddDryrunMessage(string.Format("Would create folder: {0}", options.Restorepath)); else m_systemIO.DirectoryCreate(options.Restorepath); } foreach (var folder in database.GetTargetFolders()) { try { if (result.TaskControlRendevouz() == TaskControlState.Stop) return; if (!m_systemIO.DirectoryExists(folder)) { result.FoldersRestored++; if (options.Verbose) result.AddVerboseMessage("Creating folder: {0}", folder); if (options.Dryrun) result.AddDryrunMessage(string.Format("Would create folder: {0}", folder)); else m_systemIO.DirectoryCreate(folder); } } catch (Exception ex) { result.AddWarning(string.Format("Failed to create folder: \"{0}\", message: {1}", folder, ex.Message), ex); } try { if (!options.Dryrun) ApplyMetadata(folder, database); } catch (Exception ex) { result.AddWarning(string.Format("Failed to set folder metadata: \"{0}\", message: {1}", folder, ex.Message), ex); } } }
private static void ScanForExistingTargetBlocks(LocalRestoreDatabase database, byte[] blockbuffer, System.Security.Cryptography.HashAlgorithm blockhasher, System.Security.Cryptography.HashAlgorithm filehasher, Options options, RestoreResults result) { // Scan existing files for existing BLOCKS using(var blockmarker = database.CreateBlockMarker()) { var updateCount = 0L; foreach(var restorelist in database.GetExistingFilesWithBlocks()) { var rename = !options.Overwrite; var targetpath = restorelist.TargetPath; var targetfileid = restorelist.TargetFileID; var targetfilehash = restorelist.TargetHash; if (m_systemIO.FileExists(targetpath)) { try { if (result.TaskControlRendevouz() == TaskControlState.Stop) return; if (rename) filehasher.Initialize(); using(var file = m_systemIO.FileOpenReadWrite(targetpath)) using(var block = new Blockprocessor(file, blockbuffer)) foreach(var targetblock in restorelist.Blocks) { var size = block.Readblock(); if (size <= 0) break; if (size == targetblock.Size) { var key = Convert.ToBase64String(blockhasher.ComputeHash(blockbuffer, 0, size)); if (key == targetblock.Hash) { blockmarker.SetBlockRestored(targetfileid, targetblock.Index, key, size); } } if (rename) filehasher.TransformBlock(blockbuffer, 0, size, blockbuffer, 0); } if (rename) { filehasher.TransformFinalBlock(blockbuffer, 0, 0); var filekey = Convert.ToBase64String(filehasher.Hash); if (filekey == targetfilehash) { result.AddVerboseMessage("Target file exists and is correct version: {0}", targetpath); rename = false; } else { // The new file will have none of the correct blocks, // even if the scanned file had some blockmarker.SetAllBlocksMissing(targetfileid); } } if (updateCount++ % 20 == 0) { blockmarker.UpdateProcessed(result.OperationProgressUpdater); if (result.TaskControlRendevouz() == TaskControlState.Stop) return; } } catch (Exception ex) { result.AddWarning(string.Format("Failed to read target file: \"{0}\", message: {1}", targetpath, ex.Message), ex); if (ex is System.Threading.ThreadAbortException) throw; } } else { result.AddVerboseMessage("Target file does not exist: {0}", targetpath); rename = false; } if (rename) { //Select a new filename var ext = m_systemIO.PathGetExtension(targetpath) ?? ""; if (!string.IsNullOrEmpty(ext) && !ext.StartsWith(".")) ext = "." + ext; // First we try with a simple date append, assuming that there are not many conflicts there var newname = m_systemIO.PathChangeExtension(targetpath, null) + "." + database.RestoreTime.ToLocalTime().ToString("yyyy-MM-dd"); var tr = newname + ext; var c = 0; while (m_systemIO.FileExists(tr) && c < 1000) { try { // If we have a file with the correct name, // it is most likely the file we want filehasher.Initialize(); string key; using(var file = m_systemIO.FileOpenReadWrite(tr)) key = Convert.ToBase64String(filehasher.ComputeHash(file)); if (key == targetfilehash) { blockmarker.SetAllBlocksRestored(targetfileid); break; } } catch(Exception ex) { result.AddWarning(string.Format("Failed to read candidate restore target {0}", tr), ex); } tr = newname + " (" + (c++).ToString() + ")" + ext; } newname = tr; result.AddVerboseMessage("Target file exists and will be restored to: {0}", newname); database.UpdateTargetPath(targetfileid, newname); } } blockmarker.UpdateProcessed(result.OperationProgressUpdater); blockmarker.Commit(result); } }
private void DoRun(LocalDatabase dbparent, Library.Utility.IFilter filter, RestoreResults result) { //In this case, we check that the remote storage fits with the database. //We can then query the database and find the blocks that we need to do the restore using(var database = new LocalRestoreDatabase(dbparent, m_options.Blocksize)) using(var backend = new BackendManager(m_backendurl, m_options, result.BackendWriter, database)) { database.SetResult(m_result); Utility.VerifyParameters(database, m_options); var blockhasher = System.Security.Cryptography.HashAlgorithm.Create(m_options.BlockHashAlgorithm); var filehasher = System.Security.Cryptography.HashAlgorithm.Create(m_options.FileHashAlgorithm); if (blockhasher == null) throw new Exception(string.Format(Strings.Foresthash.InvalidHashAlgorithm, m_options.BlockHashAlgorithm)); if (!blockhasher.CanReuseTransform) throw new Exception(string.Format(Strings.Foresthash.InvalidCryptoSystem, m_options.BlockHashAlgorithm)); if (filehasher == null) throw new Exception(string.Format(Strings.Foresthash.InvalidHashAlgorithm, m_options.FileHashAlgorithm)); if (!filehasher.CanReuseTransform) throw new Exception(string.Format(Strings.Foresthash.InvalidCryptoSystem, m_options.FileHashAlgorithm)); if (!m_options.NoBackendverification) { m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Restore_PreRestoreVerify); FilelistProcessor.VerifyRemoteList(backend, m_options, database, result.BackendWriter); } //Figure out what files are to be patched, and what blocks are needed m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Restore_CreateFileList); using(new Logging.Timer("PrepareBlockList")) PrepareBlockAndFileList(database, m_options, filter, result); //Make the entire output setup m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Restore_CreateTargetFolders); using(new Logging.Timer("CreateDirectory")) CreateDirectoryStructure(database, m_options, result); //If we are patching an existing target folder, do not touch stuff that is already updated m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Restore_ScanForExistingFiles); using(new Logging.Timer("ScanForexistingTargetBlocks")) ScanForExistingTargetBlocks(database, m_blockbuffer, blockhasher, filehasher, m_options, result); //Look for existing blocks in the original source files only using(new Logging.Timer("ScanForExistingSourceBlocksFast")) #if DEBUG if (!m_options.NoLocalBlocks && !string.IsNullOrEmpty(m_options.Restorepath)) #else if (!string.IsNullOrEmpty(m_options.Restorepath)) #endif { m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Restore_ScanForLocalBlocks); ScanForExistingSourceBlocksFast(database, m_options, m_blockbuffer, blockhasher, result); } if (m_result.TaskControlRendevouz() == TaskControlState.Stop) { backend.WaitForComplete(database, null); return; } // If other local files already have the blocks we want, we use them instead of downloading if (m_options.PatchWithLocalBlocks) { m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Restore_PatchWithLocalBlocks); using(new Logging.Timer("PatchWithLocalBlocks")) ScanForExistingSourceBlocks(database, m_options, m_blockbuffer, blockhasher, result); } if (m_result.TaskControlRendevouz() == TaskControlState.Stop) { backend.WaitForComplete(database, null); return; } // Fill BLOCKS with remote sources var volumes = database.GetMissingVolumes().ToList(); if (volumes.Count > 0) { m_result.AddMessage(string.Format("{0} remote files are required to restore", volumes.Count)); m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Restore_DownloadingRemoteFiles); } var brokenFiles = new List<string>(); foreach(var blockvolume in new AsyncDownloader(volumes, backend)) try { if (m_result.TaskControlRendevouz() == TaskControlState.Stop) { backend.WaitForComplete(database, null); return; } using(var tmpfile = blockvolume.TempFile) using(var blocks = new BlockVolumeReader(GetCompressionModule(blockvolume.Name), tmpfile, m_options)) PatchWithBlocklist(database, blocks, m_options, result, m_blockbuffer); } catch (Exception ex) { brokenFiles.Add(blockvolume.Name); result.AddError(string.Format("Failed to patch with remote file: \"{0}\", message: {1}", blockvolume.Name, ex.Message), ex); if (ex is System.Threading.ThreadAbortException) throw; } // Reset the filehasher if it was used to verify existing files filehasher.Initialize(); if (m_result.TaskControlRendevouz() == TaskControlState.Stop) return; m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Restore_PostRestoreVerify); var fileErrors = 0L; // After all blocks in the files are restored, verify the file hash using(new Logging.Timer("RestoreVerification")) foreach (var file in database.GetFilesToRestore()) { try { if (m_result.TaskControlRendevouz() == TaskControlState.Stop) { backend.WaitForComplete(database, null); return; } result.AddVerboseMessage("Testing restored file integrity: {0}", file.Path); string key; long size; using (var fs = m_systemIO.FileOpenRead(file.Path)) { size = fs.Length; key = Convert.ToBase64String(filehasher.ComputeHash(fs)); } if (key != file.Hash) throw new Exception(string.Format("Failed to restore file: \"{0}\". File hash is {1}, expected hash is {2}", file.Path, key, file.Hash)); result.FilesRestored++; result.SizeOfRestoredFiles += size; } catch (Exception ex) { fileErrors++; result.AddWarning(ex.Message, ex); if (ex is System.Threading.ThreadAbortException) throw; } } if (fileErrors > 0 && brokenFiles.Count > 0) m_result.AddMessage(string.Format("Failed to restore {0} files, additionally the following files failed to download, which may be the cause:{1}", fileErrors, Environment.NewLine, string.Join(Environment.NewLine, brokenFiles))); // Drop the temp tables database.DropRestoreTable(); backend.WaitForComplete(database, null); } m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Restore_Complete); result.EndTime = DateTime.UtcNow; }
private static void ScanForExistingSourceBlocks(LocalRestoreDatabase database, Options options, byte[] blockbuffer, System.Security.Cryptography.HashAlgorithm hasher, RestoreResults result) { // Fill BLOCKS with data from known local source files using (var blockmarker = database.CreateBlockMarker()) { var updateCount = 0L; foreach (var restorelist in database.GetFilesAndSourceBlocks()) { var targetpath = restorelist.TargetPath; var targetfileid = restorelist.TargetFileID; var patched = false; try { if (result.TaskControlRendevouz() == TaskControlState.Stop) return; var folderpath = m_systemIO.PathGetDirectoryName(targetpath); if (!options.Dryrun && !m_systemIO.DirectoryExists(folderpath)) { result.AddWarning(string.Format("Creating missing folder {0} for file {1}", folderpath, targetpath), null); m_systemIO.DirectoryCreate(folderpath); } using (var file = options.Dryrun ? null : m_systemIO.FileOpenReadWrite(targetpath)) using (var block = new Blockprocessor(file, blockbuffer)) foreach (var targetblock in restorelist.Blocks) { if (!options.Dryrun) file.Position = targetblock.Offset; foreach (var source in targetblock.Blocksources) { try { if (result.TaskControlRendevouz() == TaskControlState.Stop) return; if (m_systemIO.FileExists(source.Path)) using (var sourcefile = m_systemIO.FileOpenRead(source.Path)) { sourcefile.Position = source.Offset; var size = sourcefile.Read(blockbuffer, 0, blockbuffer.Length); if (size == targetblock.Size) { var key = Convert.ToBase64String(hasher.ComputeHash(blockbuffer, 0, size)); if (key == targetblock.Hash) { patched = true; if (!options.Dryrun) file.Write(blockbuffer, 0, size); blockmarker.SetBlockRestored(targetfileid, targetblock.Index, key, targetblock.Size); break; } } } } catch (Exception ex) { result.AddWarning(string.Format("Failed to patch file: \"{0}\" with data from local file \"{1}\", message: {2}", targetpath, source.Path, ex.Message), ex); if (ex is System.Threading.ThreadAbortException) throw; } } } if (updateCount++ % 20 == 0) blockmarker.UpdateProcessed(result.OperationProgressUpdater); } catch (Exception ex) { result.AddWarning(string.Format("Failed to patch file: \"{0}\" with local data, message: {1}", targetpath, ex.Message), ex); } if (patched) result.AddVerboseMessage("Target file is patched with some local data: {0}", targetpath); else result.AddVerboseMessage("Target file is not patched any local data: {0}", targetpath); if (patched && options.Dryrun) result.AddDryrunMessage(string.Format("Would patch file with local data: {0}", targetpath)); } blockmarker.UpdateProcessed(result.OperationProgressUpdater); blockmarker.Commit(result); } }
private static void ApplyStoredMetadata(LocalRestoreDatabase database, Options options, RestoreResults result, RestoreHandlerMetadataStorage metadatastorage) { foreach(var metainfo in metadatastorage.Records) { var targetpath = metainfo.Key; result.AddVerboseMessage("Patching metadata with remote data: {0}", targetpath); if (options.Dryrun) { result.AddDryrunMessage(string.Format("Would patch metadata with remote data: {0}", targetpath)); } else { try { var folderpath = m_systemIO.PathGetDirectoryName(targetpath); if (!options.Dryrun && !m_systemIO.DirectoryExists(folderpath)) { result.AddWarning(string.Format("Creating missing folder {0} for target {1}", folderpath, targetpath), null); m_systemIO.DirectoryCreate(folderpath); } ApplyMetadata(targetpath, metainfo.Value, options.RestorePermissions); } catch (Exception ex) { result.AddWarning(string.Format("Failed to apply metadata to file: \"{0}\", message: {1}", targetpath, ex.Message), ex); } } } }
private static void PatchWithBlocklist(LocalRestoreDatabase database, BlockVolumeReader blocks, Options options, RestoreResults result, byte[] blockbuffer) { var blocksize = options.Blocksize; var updateCounter = 0L; using(var blockmarker = database.CreateBlockMarker()) { foreach(var restorelist in database.GetFilesWithMissingBlocks(blocks)) { var targetpath = restorelist.Path; result.AddVerboseMessage("Patching file with remote data: {0}", targetpath); if (options.Dryrun) { result.AddDryrunMessage(string.Format("Would patch file with remote data: {0}", targetpath)); } else { try { var folderpath = m_systemIO.PathGetDirectoryName(targetpath); if (!options.Dryrun && !m_systemIO.DirectoryExists(folderpath)) { result.AddWarning(string.Format("Creating missing folder {0} for file {1}", folderpath, targetpath), null); m_systemIO.DirectoryCreate(folderpath); } // TODO: Much faster if we iterate the volume and checks what blocks are used, // because the compressors usually like sequential reading using(var file = m_systemIO.FileOpenReadWrite(targetpath)) foreach(var targetblock in restorelist.Blocks) { file.Position = targetblock.Offset; var size = blocks.ReadBlock(targetblock.Key, blockbuffer); if (targetblock.Size == size) { file.Write(blockbuffer, 0, size); blockmarker.SetBlockRestored(restorelist.FileID, targetblock.Offset / blocksize, targetblock.Key, size); } } if (updateCounter++ % 20 == 0) blockmarker.UpdateProcessed(result.OperationProgressUpdater); } catch (Exception ex) { result.AddWarning(string.Format("Failed to patch file: \"{0}\", message: {1}, message: {1}", targetpath, ex.Message), ex); } try { ApplyMetadata(targetpath, database); } catch (Exception ex) { result.AddWarning(string.Format("Failed to apply metadata to file: \"{0}\", message: {1}", targetpath, ex.Message), ex); } } } blockmarker.UpdateProcessed(result.OperationProgressUpdater); blockmarker.Commit(result); } }
private static void ScanForExistingTargetBlocks(LocalRestoreDatabase database, byte[] blockbuffer, System.Security.Cryptography.HashAlgorithm blockhasher, System.Security.Cryptography.HashAlgorithm filehasher, Options options, RestoreResults result) { // Scan existing files for existing BLOCKS using(var blockmarker = database.CreateBlockMarker()) { var updateCount = 0L; foreach(var restorelist in database.GetExistingFilesWithBlocks()) { var rename = !options.Overwrite; var targetpath = restorelist.TargetPath; var targetfileid = restorelist.TargetFileID; var targetfilehash = restorelist.TargetHash; var targetfilelength = restorelist.Length; if (m_systemIO.FileExists(targetpath)) { try { if (result.TaskControlRendevouz() == TaskControlState.Stop) return; var currentfilelength = m_systemIO.FileLength(targetpath); var wasTruncated = false; // Adjust file length in overwrite mode if necessary (smaller is ok, will be extended during restore) // We do it before scanning for blocks. This allows full verification on files that only needs to // be truncated (i.e. forthwritten log files). if (!rename && currentfilelength > targetfilelength) { var currentAttr = m_systemIO.GetFileAttributes(targetpath); if ((currentAttr & System.IO.FileAttributes.ReadOnly) != 0) // clear readonly attribute { if (options.Dryrun) result.AddDryrunMessage(string.Format("Would reset read-only attribute on file: {0}", targetpath)); else m_systemIO.SetFileAttributes(targetpath, currentAttr & ~System.IO.FileAttributes.ReadOnly); } if (options.Dryrun) result.AddDryrunMessage(string.Format("Would truncate file '{0}' to length of {1:N0} bytes", targetpath, targetfilelength)); else { using (var file = m_systemIO.FileOpenWrite(targetpath)) file.SetLength(targetfilelength); currentfilelength = targetfilelength; } wasTruncated = true; } // If file size does not match and we have to rename on conflict, // the whole scan can be skipped here because all blocks have to be restored anyway. // For the other cases, we will check block and and file hashes and look for blocks // to be restored and files that can already be verified. if (!rename || currentfilelength == targetfilelength) { // a file hash for verification will only be necessary if the file has exactly // the wanted size so we have a chance to already mark the file as data-verified. bool calcFileHash = (currentfilelength == targetfilelength); if (calcFileHash) filehasher.Initialize(); using (var file = m_systemIO.FileOpenRead(targetpath)) using (var block = new Blockprocessor(file, blockbuffer)) foreach (var targetblock in restorelist.Blocks) { var size = block.Readblock(); if (size <= 0) break; //TODO: Handle Metadata bool blockhashmatch = false; if (size == targetblock.Size) { // Parallelize file hash calculation on rename. Running read-only on same array should not cause conflicts or races. // Actually, in future always calculate the file hash and mark the file data as already verified. System.Threading.Tasks.Task calcFileHashTask = null; if (calcFileHash) calcFileHashTask = System.Threading.Tasks.Task.Run( () => filehasher.TransformBlock(blockbuffer, 0, size, blockbuffer, 0)); var key = Convert.ToBase64String(blockhasher.ComputeHash(blockbuffer, 0, size)); if (calcFileHashTask != null) calcFileHashTask.Wait(); // wait because blockbuffer will be overwritten. if (key == targetblock.Hash) { blockmarker.SetBlockRestored(targetfileid, targetblock.Index, key, size, false); blockhashmatch = true; } } if (calcFileHash && !blockhashmatch) // will not be necessary anymore { filehasher.TransformFinalBlock(blockbuffer, 0, 0); // So a new initialize will not throw calcFileHash = false; if (rename) // file does not match. So break. break; } } bool fullfilehashmatch = false; if (calcFileHash) // now check if files are identical { filehasher.TransformFinalBlock(blockbuffer, 0, 0); var filekey = Convert.ToBase64String(filehasher.Hash); fullfilehashmatch = (filekey == targetfilehash); } if (!rename && !fullfilehashmatch && !wasTruncated) // Reset read-only attribute (if set) to overwrite { var currentAttr = m_systemIO.GetFileAttributes(targetpath); if ((currentAttr & System.IO.FileAttributes.ReadOnly) != 0) { if (options.Dryrun) result.AddDryrunMessage(string.Format("Would reset read-only attribute on file: {0}", targetpath)); else m_systemIO.SetFileAttributes(targetpath, currentAttr & ~System.IO.FileAttributes.ReadOnly); } } if (fullfilehashmatch) { //TODO: Check metadata to trigger rename? If metadata changed, it will still be restored for the file in-place. blockmarker.SetFileDataVerified(targetfileid); result.AddVerboseMessage("Target file exists{1} and is correct version: {0}", targetpath, wasTruncated ? " (but was truncated)" : ""); rename = false; } else if (rename) { // The new file will have none of the correct blocks, // even if the scanned file had some blockmarker.SetAllBlocksMissing(targetfileid); } } if ((++updateCount) % 20 == 0) { blockmarker.UpdateProcessed(result.OperationProgressUpdater); if (result.TaskControlRendevouz() == TaskControlState.Stop) return; } } catch (Exception ex) { result.AddWarning(string.Format("Failed to read target file: \"{0}\", message: {1}", targetpath, ex.Message), ex); if (ex is System.Threading.ThreadAbortException) throw; } } else { result.AddVerboseMessage("Target file does not exist: {0}", targetpath); rename = false; } if (rename) { //Select a new filename var ext = m_systemIO.PathGetExtension(targetpath) ?? ""; if (!string.IsNullOrEmpty(ext) && !ext.StartsWith(".")) ext = "." + ext; // First we try with a simple date append, assuming that there are not many conflicts there var newname = m_systemIO.PathChangeExtension(targetpath, null) + "." + database.RestoreTime.ToLocalTime().ToString("yyyy-MM-dd", System.Globalization.CultureInfo.InvariantCulture); var tr = newname + ext; var c = 0; while (m_systemIO.FileExists(tr) && c < 1000) { try { // If we have a file with the correct name, // it is most likely the file we want filehasher.Initialize(); string key; using(var file = m_systemIO.FileOpenRead(tr)) key = Convert.ToBase64String(filehasher.ComputeHash(file)); if (key == targetfilehash) { //TODO: Also needs metadata check to make correct decision. // We stick to the policy to restore metadata in place, if data ok. So, metadata block may be restored. blockmarker.SetAllBlocksRestored(targetfileid, false); blockmarker.SetFileDataVerified(targetfileid); break; } } catch(Exception ex) { result.AddWarning(string.Format("Failed to read candidate restore target {0}", tr), ex); } tr = newname + " (" + (c++).ToString() + ")" + ext; } newname = tr; result.AddVerboseMessage("Target file exists and will be restored to: {0}", newname); database.UpdateTargetPath(targetfileid, newname); } } blockmarker.UpdateProcessed(result.OperationProgressUpdater); blockmarker.Commit(result); } }
private static void PatchWithBlocklist(LocalRestoreDatabase database, BlockVolumeReader blocks, Options options, RestoreResults result, byte[] blockbuffer, RestoreHandlerMetadataStorage metadatastorage) { var blocksize = options.Blocksize; var updateCounter = 0L; var fullblockverification = options.FullBlockVerification; var blockhasher = fullblockverification ? System.Security.Cryptography.HashAlgorithm.Create(options.BlockHashAlgorithm) : null; using(var blockmarker = database.CreateBlockMarker()) using(var volumekeeper = database.GetMissingBlockData(blocks, options.Blocksize)) { foreach(var restorelist in volumekeeper.FilesWithMissingBlocks) { var targetpath = restorelist.Path; result.AddVerboseMessage("Patching file with remote data: {0}", targetpath); if (options.Dryrun) { result.AddDryrunMessage(string.Format("Would patch file with remote data: {0}", targetpath)); } else { try { var folderpath = m_systemIO.PathGetDirectoryName(targetpath); if (!options.Dryrun && !m_systemIO.DirectoryExists(folderpath)) { result.AddWarning(string.Format("Creating missing folder {0} for file {1}", folderpath, targetpath), null); m_systemIO.DirectoryCreate(folderpath); } // TODO: Much faster if we iterate the volume and checks what blocks are used, // because the compressors usually like sequential reading using(var file = m_systemIO.FileOpenReadWrite(targetpath)) foreach(var targetblock in restorelist.Blocks) { file.Position = targetblock.Offset; var size = blocks.ReadBlock(targetblock.Key, blockbuffer); if (targetblock.Size == size) { var valid = !fullblockverification; if (!valid) { blockhasher.Initialize(); var key = Convert.ToBase64String(blockhasher.ComputeHash(blockbuffer, 0, size)); if (targetblock.Key == key) valid = true; else result.AddWarning(string.Format("Invalid block detected for {0}, expected hash: {1}, actual hash: {2}", targetpath, targetblock.Key, key), null); } if (valid) { file.Write(blockbuffer, 0, size); blockmarker.SetBlockRestored(restorelist.FileID, targetblock.Offset / blocksize, targetblock.Key, size, false); } } } if (updateCounter++ % 20 == 0) blockmarker.UpdateProcessed(result.OperationProgressUpdater); } catch (Exception ex) { result.AddWarning(string.Format("Failed to patch file: \"{0}\", message: {1}, message: {1}", targetpath, ex.Message), ex); } } } if (!options.SkipMetadata) { foreach(var restoremetadata in volumekeeper.MetadataWithMissingBlocks) { var targetpath = restoremetadata.Path; result.AddVerboseMessage("Recording metadata from remote data: {0}", targetpath); try { // TODO: When we support multi-block metadata this needs to deal with it using(var ms = new System.IO.MemoryStream()) { foreach(var targetblock in restoremetadata.Blocks) { ms.Position = targetblock.Offset; var size = blocks.ReadBlock(targetblock.Key, blockbuffer); if (targetblock.Size == size) { ms.Write(blockbuffer, 0, size); blockmarker.SetBlockRestored(restoremetadata.FileID, targetblock.Offset / blocksize, targetblock.Key, size, true); } } ms.Position = 0; metadatastorage.Add(targetpath, ms); //blockmarker.RecordMetadata(restoremetadata.FileID, ms); } } catch (Exception ex) { result.AddWarning(string.Format("Failed to record metadata for file: \"{0}\", message: {1}", targetpath, ex.Message), ex); } } } blockmarker.UpdateProcessed(result.OperationProgressUpdater); blockmarker.Commit(result); } }
private static void ScanForExistingTargetBlocks(LocalRestoreDatabase database, byte[] blockbuffer, System.Security.Cryptography.HashAlgorithm blockhasher, System.Security.Cryptography.HashAlgorithm filehasher, Options options, RestoreResults result) { // Scan existing files for existing BLOCKS using (var blockmarker = database.CreateBlockMarker()) { var updateCount = 0L; foreach (var restorelist in database.GetExistingFilesWithBlocks()) { var rename = !options.Overwrite; var targetpath = restorelist.TargetPath; var targetfileid = restorelist.TargetFileID; var targetfilehash = restorelist.TargetHash; if (m_systemIO.FileExists(targetpath)) { try { if (result.TaskControlRendevouz() == TaskControlState.Stop) { return; } if (rename) { filehasher.Initialize(); } using (var file = m_systemIO.FileOpenReadWrite(targetpath)) using (var block = new Blockprocessor(file, blockbuffer)) foreach (var targetblock in restorelist.Blocks) { var size = block.Readblock(); if (size <= 0) { break; } if (size == targetblock.Size) { var key = Convert.ToBase64String(blockhasher.ComputeHash(blockbuffer, 0, size)); if (key == targetblock.Hash) { blockmarker.SetBlockRestored(targetfileid, targetblock.Index, key, size); } } if (rename) { filehasher.TransformBlock(blockbuffer, 0, size, blockbuffer, 0); } } if (rename) { filehasher.TransformFinalBlock(blockbuffer, 0, 0); var filekey = Convert.ToBase64String(filehasher.Hash); if (filekey == targetfilehash) { result.AddVerboseMessage("Target file exists and is correct version: {0}", targetpath); rename = false; } else { // The new file will have none of the correct blocks, // even if the scanned file had some blockmarker.SetAllBlocksMissing(targetfileid); } } if (updateCount++ % 20 == 0) { blockmarker.UpdateProcessed(result.OperationProgressUpdater); if (result.TaskControlRendevouz() == TaskControlState.Stop) { return; } } } catch (Exception ex) { result.AddWarning(string.Format("Failed to read target file: \"{0}\", message: {1}", targetpath, ex.Message), ex); if (ex is System.Threading.ThreadAbortException) { throw; } } } else { result.AddVerboseMessage("Target file does not exist: {0}", targetpath); rename = false; } if (rename) { //Select a new filename var ext = m_systemIO.PathGetExtension(targetpath) ?? ""; if (!string.IsNullOrEmpty(ext) && !ext.StartsWith(".")) { ext = "." + ext; } // First we try with a simple date append, assuming that there are not many conflicts there var newname = m_systemIO.PathChangeExtension(targetpath, null) + "." + database.RestoreTime.ToLocalTime().ToString("yyyy-MM-dd"); var tr = newname + ext; var c = 0; while (m_systemIO.FileExists(tr) && c < 1000) { try { // If we have a file with the correct name, // it is most likely the file we want filehasher.Initialize(); string key; using (var file = m_systemIO.FileOpenReadWrite(tr)) key = Convert.ToBase64String(filehasher.ComputeHash(file)); if (key == targetfilehash) { blockmarker.SetAllBlocksRestored(targetfileid); break; } } catch (Exception ex) { result.AddWarning(string.Format("Failed to read candidate restore target {0}", tr), ex); } tr = newname + " (" + (c++).ToString() + ")" + ext; } newname = tr; result.AddVerboseMessage("Target file exists and will be restored to: {0}", newname); database.UpdateTargetPath(targetfileid, newname); } } blockmarker.UpdateProcessed(result.OperationProgressUpdater); blockmarker.Commit(result); } }
private static void CreateDirectoryStructure(LocalRestoreDatabase database, Options options, RestoreResults result) { // This part is not protected by try/catch as we need the target folder to exist if (!string.IsNullOrEmpty(options.Restorepath)) { if (!m_systemIO.DirectoryExists(options.Restorepath)) { if (options.Verbose) { result.AddVerboseMessage("Creating folder: {0}", options.Restorepath); } if (options.Dryrun) { result.AddDryrunMessage(string.Format("Would create folder: {0}", options.Restorepath)); } else { m_systemIO.DirectoryCreate(options.Restorepath); } } } foreach (var folder in database.GetTargetFolders()) { try { if (result.TaskControlRendevouz() == TaskControlState.Stop) { return; } if (!m_systemIO.DirectoryExists(folder)) { result.FoldersRestored++; if (options.Verbose) { result.AddVerboseMessage("Creating folder: {0}", folder); } if (options.Dryrun) { result.AddDryrunMessage(string.Format("Would create folder: {0}", folder)); } else { m_systemIO.DirectoryCreate(folder); } } } catch (Exception ex) { result.AddWarning(string.Format("Failed to create folder: \"{0}\", message: {1}", folder, ex.Message), ex); } try { if (!options.Dryrun) { ApplyMetadata(folder, database); } } catch (Exception ex) { result.AddWarning(string.Format("Failed to set folder metadata: \"{0}\", message: {1}", folder, ex.Message), ex); } } }
private static void ScanForExistingSourceBlocks(LocalRestoreDatabase database, Options options, byte[] blockbuffer, System.Security.Cryptography.HashAlgorithm hasher, RestoreResults result) { // Fill BLOCKS with data from known local source files using (var blockmarker = database.CreateBlockMarker()) { var updateCount = 0L; foreach (var restorelist in database.GetFilesAndSourceBlocks()) { var targetpath = restorelist.TargetPath; var targetfileid = restorelist.TargetFileID; var patched = false; try { if (result.TaskControlRendevouz() == TaskControlState.Stop) { return; } var folderpath = m_systemIO.PathGetDirectoryName(targetpath); if (!options.Dryrun && !m_systemIO.DirectoryExists(folderpath)) { result.AddWarning(string.Format("Creating missing folder {0} for file {1}", folderpath, targetpath), null); m_systemIO.DirectoryCreate(folderpath); } using (var file = options.Dryrun ? null : m_systemIO.FileOpenReadWrite(targetpath)) using (var block = new Blockprocessor(file, blockbuffer)) foreach (var targetblock in restorelist.Blocks) { if (!options.Dryrun) { file.Position = targetblock.Offset; } foreach (var source in targetblock.Blocksources) { try { if (result.TaskControlRendevouz() == TaskControlState.Stop) { return; } if (m_systemIO.FileExists(source.Path)) { using (var sourcefile = m_systemIO.FileOpenRead(source.Path)) { sourcefile.Position = source.Offset; var size = sourcefile.Read(blockbuffer, 0, blockbuffer.Length); if (size == targetblock.Size) { var key = Convert.ToBase64String(hasher.ComputeHash(blockbuffer, 0, size)); if (key == targetblock.Hash) { patched = true; if (!options.Dryrun) { file.Write(blockbuffer, 0, size); } blockmarker.SetBlockRestored(targetfileid, targetblock.Index, key, targetblock.Size); break; } } } } } catch (Exception ex) { result.AddWarning(string.Format("Failed to patch file: \"{0}\" with data from local file \"{1}\", message: {2}", targetpath, source.Path, ex.Message), ex); if (ex is System.Threading.ThreadAbortException) { throw; } } } } if (updateCount++ % 20 == 0) { blockmarker.UpdateProcessed(result.OperationProgressUpdater); } } catch (Exception ex) { result.AddWarning(string.Format("Failed to patch file: \"{0}\" with local data, message: {1}", targetpath, ex.Message), ex); } if (patched) { result.AddVerboseMessage("Target file is patched with some local data: {0}", targetpath); } else { result.AddVerboseMessage("Target file is not patched any local data: {0}", targetpath); } if (patched && options.Dryrun) { result.AddDryrunMessage(string.Format("Would patch file with local data: {0}", targetpath)); } } blockmarker.UpdateProcessed(result.OperationProgressUpdater); blockmarker.Commit(result); } }
private void DoRun(LocalDatabase dbparent, Library.Utility.IFilter filter, RestoreResults result) { //In this case, we check that the remote storage fits with the database. //We can then query the database and find the blocks that we need to do the restore using (var database = new LocalRestoreDatabase(dbparent, m_options.Blocksize)) using (var backend = new BackendManager(m_backendurl, m_options, result.BackendWriter, database)) { database.SetResult(m_result); Utility.VerifyParameters(database, m_options); var blockhasher = System.Security.Cryptography.HashAlgorithm.Create(m_options.BlockHashAlgorithm); var filehasher = System.Security.Cryptography.HashAlgorithm.Create(m_options.FileHashAlgorithm); if (blockhasher == null) { throw new Exception(string.Format(Strings.Foresthash.InvalidHashAlgorithm, m_options.BlockHashAlgorithm)); } if (!blockhasher.CanReuseTransform) { throw new Exception(string.Format(Strings.Foresthash.InvalidCryptoSystem, m_options.BlockHashAlgorithm)); } if (filehasher == null) { throw new Exception(string.Format(Strings.Foresthash.InvalidHashAlgorithm, m_options.FileHashAlgorithm)); } if (!filehasher.CanReuseTransform) { throw new Exception(string.Format(Strings.Foresthash.InvalidCryptoSystem, m_options.FileHashAlgorithm)); } if (!m_options.NoBackendverification) { m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Restore_PreRestoreVerify); FilelistProcessor.VerifyRemoteList(backend, m_options, database, result.BackendWriter); } //Figure out what files are to be patched, and what blocks are needed m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Restore_CreateFileList); using (new Logging.Timer("PrepareBlockList")) PrepareBlockAndFileList(database, m_options, filter, result); //Make the entire output setup m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Restore_CreateTargetFolders); using (new Logging.Timer("CreateDirectory")) CreateDirectoryStructure(database, m_options, result); //If we are patching an existing target folder, do not touch stuff that is already updated m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Restore_ScanForExistingFiles); using (new Logging.Timer("ScanForexistingTargetBlocks")) ScanForExistingTargetBlocks(database, m_blockbuffer, blockhasher, filehasher, m_options, result); //Look for existing blocks in the original source files only using (new Logging.Timer("ScanForExistingSourceBlocksFast")) #if DEBUG if (!m_options.NoLocalBlocks && !string.IsNullOrEmpty(m_options.Restorepath)) #else if (!string.IsNullOrEmpty(m_options.Restorepath)) #endif { m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Restore_ScanForLocalBlocks); ScanForExistingSourceBlocksFast(database, m_options, m_blockbuffer, blockhasher, result); } if (m_result.TaskControlRendevouz() == TaskControlState.Stop) { backend.WaitForComplete(database, null); return; } // If other local files already have the blocks we want, we use them instead of downloading if (m_options.PatchWithLocalBlocks) { m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Restore_PatchWithLocalBlocks); using (new Logging.Timer("PatchWithLocalBlocks")) ScanForExistingSourceBlocks(database, m_options, m_blockbuffer, blockhasher, result); } if (m_result.TaskControlRendevouz() == TaskControlState.Stop) { backend.WaitForComplete(database, null); return; } // Fill BLOCKS with remote sources var volumes = database.GetMissingVolumes().ToList(); if (volumes.Count > 0) { m_result.AddMessage(string.Format("{0} remote files are required to restore", volumes.Count)); m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Restore_DownloadingRemoteFiles); } var brokenFiles = new List <string>(); foreach (var blockvolume in new AsyncDownloader(volumes, backend)) { try { if (m_result.TaskControlRendevouz() == TaskControlState.Stop) { backend.WaitForComplete(database, null); return; } using (var tmpfile = blockvolume.TempFile) using (var blocks = new BlockVolumeReader(GetCompressionModule(blockvolume.Name), tmpfile, m_options)) PatchWithBlocklist(database, blocks, m_options, result, m_blockbuffer); } catch (Exception ex) { brokenFiles.Add(blockvolume.Name); result.AddError(string.Format("Failed to patch with remote file: \"{0}\", message: {1}", blockvolume.Name, ex.Message), ex); if (ex is System.Threading.ThreadAbortException) { throw; } } } // Reset the filehasher if it was used to verify existing files filehasher.Initialize(); if (m_result.TaskControlRendevouz() == TaskControlState.Stop) { return; } m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Restore_PostRestoreVerify); var fileErrors = 0L; // After all blocks in the files are restored, verify the file hash using (new Logging.Timer("RestoreVerification")) foreach (var file in database.GetFilesToRestore()) { try { if (m_result.TaskControlRendevouz() == TaskControlState.Stop) { backend.WaitForComplete(database, null); return; } result.AddVerboseMessage("Testing restored file integrity: {0}", file.Path); string key; long size; using (var fs = m_systemIO.FileOpenRead(file.Path)) { size = fs.Length; key = Convert.ToBase64String(filehasher.ComputeHash(fs)); } if (key != file.Hash) { throw new Exception(string.Format("Failed to restore file: \"{0}\". File hash is {1}, expected hash is {2}", file.Path, key, file.Hash)); } result.FilesRestored++; result.SizeOfRestoredFiles += size; } catch (Exception ex) { fileErrors++; result.AddWarning(ex.Message, ex); if (ex is System.Threading.ThreadAbortException) { throw; } } } if (fileErrors > 0 && brokenFiles.Count > 0) { m_result.AddMessage(string.Format("Failed to restore {0} files, additionally the following files failed to download, which may be the cause:{1}", fileErrors, Environment.NewLine, string.Join(Environment.NewLine, brokenFiles))); } // Drop the temp tables database.DropRestoreTable(); backend.WaitForComplete(database, null); } m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Restore_Complete); result.EndTime = DateTime.UtcNow; }
private static void PatchWithBlocklist(LocalRestoreDatabase database, BlockVolumeReader blocks, Options options, RestoreResults result, byte[] blockbuffer) { var blocksize = options.Blocksize; var updateCounter = 0L; using (var blockmarker = database.CreateBlockMarker()) { foreach (var restorelist in database.GetFilesWithMissingBlocks(blocks)) { var targetpath = restorelist.Path; result.AddVerboseMessage("Patching file with remote data: {0}", targetpath); if (options.Dryrun) { result.AddDryrunMessage(string.Format("Would patch file with remote data: {0}", targetpath)); } else { try { var folderpath = m_systemIO.PathGetDirectoryName(targetpath); if (!options.Dryrun && !m_systemIO.DirectoryExists(folderpath)) { result.AddWarning(string.Format("Creating missing folder {0} for file {1}", folderpath, targetpath), null); m_systemIO.DirectoryCreate(folderpath); } // TODO: Much faster if we iterate the volume and checks what blocks are used, // because the compressors usually like sequential reading using (var file = m_systemIO.FileOpenReadWrite(targetpath)) foreach (var targetblock in restorelist.Blocks) { file.Position = targetblock.Offset; var size = blocks.ReadBlock(targetblock.Key, blockbuffer); if (targetblock.Size == size) { file.Write(blockbuffer, 0, size); blockmarker.SetBlockRestored(restorelist.FileID, targetblock.Offset / blocksize, targetblock.Key, size); } } if (updateCounter++ % 20 == 0) { blockmarker.UpdateProcessed(result.OperationProgressUpdater); } } catch (Exception ex) { result.AddWarning(string.Format("Failed to patch file: \"{0}\", message: {1}, message: {1}", targetpath, ex.Message), ex); } try { ApplyMetadata(targetpath, database); } catch (Exception ex) { result.AddWarning(string.Format("Failed to apply metadata to file: \"{0}\", message: {1}", targetpath, ex.Message), ex); } } } blockmarker.UpdateProcessed(result.OperationProgressUpdater); blockmarker.Commit(result); } }