/// <summary> /// Initiates creating a content/signature pair. /// </summary> /// <param name="full">True if the set is a full backup, false if it is incremental</param> /// <param name="options">Any setup options to use</param> public void InitiateMultiPassDiff(bool full, Options options) { if (full) { m_oldFolders = new Dictionary<string, DateTime>(); m_oldSignatures = new Dictionary<string, ArchiveWrapper>(); m_oldSymlinks = new Dictionary<string, string>(); } m_newfiles = new Dictionary<string, string>(); m_modifiedFiles = new Dictionary<string, string>(); m_deletedfiles = new List<string>(); m_newfolders = new List<KeyValuePair<string, DateTime>>(); m_updatedfolders = new List<KeyValuePair<string, DateTime>>(); m_deletedfolders = new List<string>(); m_checkedUnchangedFiles = new List<string>(); m_lastPartialFile = null; m_openfilepolicy = Options.OpenFileStrategy.Ignore; try { if (options.SnapShotStrategy != Options.OptimizationStrategy.Off) m_snapshot = XervBackup.Library.Snapshots.SnapshotUtility.CreateSnapshot(m_sourcefolder, options.RawOptions); } catch (Exception ex) { if (options.SnapShotStrategy == Options.OptimizationStrategy.Required) throw; else if (options.SnapShotStrategy == Options.OptimizationStrategy.On) { if (m_stat != null) m_stat.LogWarning(string.Format(Strings.RSyncDir.SnapshotFailedError, ex.ToString()), ex); } } //Failsafe, just use a plain implementation if (m_snapshot == null) { m_snapshot = Utility.Utility.IsClientLinux ? (Library.Snapshots.ISnapshotService)new XervBackup.Library.Snapshots.NoSnapshotLinux(m_sourcefolder, options.RawOptions) : (Library.Snapshots.ISnapshotService)new XervBackup.Library.Snapshots.NoSnapshotWindows(m_sourcefolder, options.RawOptions); m_openfilepolicy = options.OpenFilePolicy; } Dictionary<string, Snapshots.USNHelper> usnHelpers = null; List<string> unchanged = new List<string>(); m_unproccesed = new PathCollector(m_snapshot, options.SymlinkPolicy, options.FileAttributeFilter, m_filter, m_stat); try { if (options.UsnStrategy != Options.OptimizationStrategy.Off) { if (Utility.Utility.IsClientLinux && options.UsnStrategy != Options.OptimizationStrategy.Auto) throw new Exception(Strings.RSyncDir.UsnNotSupportedOnLinuxError); /* if (options.DisableUSNDiffCheck) m_lastUSN = null; */ usnHelpers = new Dictionary<string, XervBackup.Library.Snapshots.USNHelper>(Utility.Utility.ClientFilenameStringComparer); foreach (string s in m_sourcefolder) { string rootFolder = System.IO.Path.GetPathRoot(s); if (!usnHelpers.ContainsKey(rootFolder)) try { usnHelpers[rootFolder] = new XervBackup.Library.Snapshots.USNHelper(rootFolder); } catch (Exception ex) { if (options.UsnStrategy == Options.OptimizationStrategy.Required) throw; else if (options.UsnStrategy == Options.OptimizationStrategy.On) { if (m_stat != null) m_stat.LogWarning(string.Format(Strings.RSyncDir.UsnFailedError, ex.ToString()), ex); } } if (usnHelpers.ContainsKey(rootFolder)) { //This code is broken, see issue 332: //http://code.google.com/p/XervBackup/issues/detail?id=332 /* if (m_lastUSN != null && m_lastUSN.ContainsKey(rootFolder)) { if (m_lastUSN[rootFolder].Key != usnHelpers[rootFolder].JournalID) { if (m_stat != null) m_stat.LogWarning(string.Format(Strings.RSyncDir.UsnJournalIdChangedWarning, rootFolder, m_lastUSN[rootFolder].Key, usnHelpers[rootFolder].JournalID), null); //Just take it all again usnHelpers[rootFolder].EnumerateFilesAndFolders(s, m_filter, m_unproccesed.Callback); } else if (m_lastUSN[rootFolder].Value > usnHelpers[rootFolder].USN) { if (m_stat != null) m_stat.LogWarning(string.Format(Strings.RSyncDir.UsnNumberingFaultWarning, rootFolder, m_lastUSN[rootFolder].Value, usnHelpers[rootFolder].USN), null); //Just take it all again usnHelpers[rootFolder].EnumerateFilesAndFolders(s, m_filter, m_unproccesed.Callback); } else //All good we rely on USN numbers to find a list of changed files { //Find all changed files and folders Dictionary<string, string> tmp = new Dictionary<string, string>(Utility.Utility.ClientFilenameStringComparer); foreach (string sx in usnHelpers[rootFolder].GetChangedFileSystemEntries(s, m_lastUSN[rootFolder].Value)) { tmp.Add(sx, null); if (!sx.EndsWith(System.IO.Path.DirectorySeparatorChar.ToString())) m_unproccesed.Files.Add(sx); } //Remove the existing unchanged ones foreach (string x in usnHelpers[rootFolder].GetFileSystemEntries(s)) if (!tmp.ContainsKey(x)) unchanged.Add(x); } } else */ { usnHelpers[rootFolder].EnumerateFilesAndFolders(s, m_unproccesed.Callback); } } } if (usnHelpers.Count > 0) { m_currentUSN = new USNRecord(); foreach (KeyValuePair<string, Snapshots.USNHelper> kx in usnHelpers) m_currentUSN.Add(kx.Key, kx.Value.JournalID, kx.Value.USN); } } } catch (Exception ex) { if (options.UsnStrategy == Options.OptimizationStrategy.Required) throw; else if (options.UsnStrategy == Options.OptimizationStrategy.On) { if (m_stat != null) m_stat.LogWarning(string.Format(Strings.RSyncDir.UsnFailedError, ex.ToString()), ex); } //If we get here, something went really wrong with USN, so we disable it m_currentUSN = null; m_unproccesed = new PathCollector(m_snapshot, options.SymlinkPolicy, options.FileAttributeFilter, m_filter, m_stat); unchanged = null; } finally { if (usnHelpers != null) foreach(Snapshots.USNHelper h in usnHelpers.Values) try { h.Dispose(); } catch (Exception ex) { if (m_stat != null) m_stat.LogWarning(string.Format(Strings.RSyncDir.UsnDisposeFailedWarning, ex.ToString()), ex); } } if (m_currentUSN == null) { m_snapshot.EnumerateFilesAndFolders(m_unproccesed.Callback); } else { //Skip all items that we know are unchanged foreach (string x in unchanged) { string relpath = GetRelativeName(x); m_oldSignatures.Remove(relpath); m_oldFolders.Remove(relpath); m_oldSymlinks.Remove(relpath); } //If some folders did not support USN, add their files now foreach (string s in m_sourcefolder) if (!m_currentUSN.ContainsKey(System.IO.Path.GetPathRoot(s))) m_snapshot.EnumerateFilesAndFolders(s, m_unproccesed.Callback); } m_totalfiles = m_unproccesed.Files.Count; m_isfirstmultipass = true; if (options.ExcludeEmptyFolders) { //We remove the folders that have no files. //It would be more optimal to exclude them from the list before this point, // but that would require rewriting of the snapshots //We can't rely on the order of the folders, so we sort them to get the shortest folder names first m_unproccesed.Folders.Sort(Utility.Utility.ClientFilenameStringComparer); //We can't rely on the order of the files either, but sorting them allows us to use a O=log(n) search rather than O=n m_unproccesed.Files.Sort(Utility.Utility.ClientFilenameStringComparer); for (int i = 0; i < m_unproccesed.Folders.Count; i++) { string folder = m_unproccesed.Folders[i]; int ix = m_unproccesed.Files.BinarySearch(folder, Utility.Utility.ClientFilenameStringComparer); if (ix >= 0) continue; //Should not happen, means that a file has the same name as a folder //Get the next index larger than the foldername ix = ~ix; if (ix >= m_unproccesed.Files.Count) { //No files matched m_unproccesed.Folders.RemoveAt(i); i--; } else { //If the element does not start with the foldername, no files from the folder are included if (!m_unproccesed.Files[ix].StartsWith(folder, Utility.Utility.ClientFilenameStringComparision)) { //Speedup, remove all subfolders as well without performing binary searches while (i < m_unproccesed.Folders.Count && m_unproccesed.Folders[i].StartsWith(folder)) m_unproccesed.Folders.RemoveAt(i); //We have removed at least one, so adjust the loop counter i--; } } } } //Build folder diffs foreach(string s in m_unproccesed.Folders) { try { string relpath = GetRelativeName(s); if (relpath.Trim().Length != 0) { DateTime lastWrite = m_snapshot.GetLastWriteTime(s).ToUniversalTime(); //Cut off as we only have seconds stored lastWrite = new DateTime(lastWrite.Year, lastWrite.Month, lastWrite.Day, lastWrite.Hour, lastWrite.Minute, lastWrite.Second, DateTimeKind.Utc); if (!m_oldFolders.ContainsKey(relpath)) m_newfolders.Add(new KeyValuePair<string, DateTime>(relpath, lastWrite)); else { if (m_oldFolders[relpath] != lastWrite) m_updatedfolders.Add(new KeyValuePair<string, DateTime>(relpath, lastWrite)); m_oldFolders.Remove(relpath); } } } catch (Exception ex) { m_unproccesed.Errors.Add(s); m_stat.LogError(string.Format(Strings.RSyncDir.FolderModificationTimeReadError, s, ex.Message), ex); } } m_unproccesed.Folders.Clear(); foreach(string s in m_oldFolders.Keys) if (!m_unproccesed.IsAffectedByError(s)) m_deletedfolders.Add(s); //Build symlink diffs if (m_oldSymlinks.Count > 0) { for (int i = m_unproccesed.Symlinks.Count - 1; i >= 0; i--) { string s = m_unproccesed.Symlinks[i].Key; try { string relpath = GetRelativeName(s); if (relpath.Trim().Length != 0) { string oldLink; if (m_oldSymlinks.TryGetValue(relpath, out oldLink)) { m_oldSymlinks.Remove(relpath); if (string.Equals(oldLink, m_unproccesed.Symlinks[i].Value, Utility.Utility.ClientFilenameStringComparision)) { m_unproccesed.Symlinks.RemoveAt(i); } } } } catch (Exception ex) { m_unproccesed.Errors.Add(s); m_stat.LogError(string.Format(Strings.RSyncDir.SymlinkReadError, s, ex.Message), ex); } } } m_deletedfiles.AddRange(m_oldSymlinks.Keys); m_oldSymlinks.Clear(); m_sortedfilelist = options.SortedFilelist; if (m_sortedfilelist) m_unproccesed.Files.Sort(Utility.Utility.ClientFilenameStringComparer); }
public void Dispose() { if (m_lastPartialFile != null) { try { m_lastPartialFile.Dispose(); } catch { } m_lastPartialFile = null; } if (m_patches != null) { foreach (Library.Interface.ICompression arc in m_patches) try { arc.Dispose(); } catch { } m_patches = null; } if (m_partialDeltas != null) { foreach (Utility.TempFile tf in m_partialDeltas.Values) try { if (tf != null) tf.Dispose(); } catch { } m_partialDeltas = null; } if (m_stat is BackupStatistics) { BackupStatistics bs = m_stat as BackupStatistics; if (m_deletedfiles != null) bs.DeletedFiles = m_deletedfiles.Count; if (m_deletedfolders != null) bs.DeletedFolders = m_deletedfolders.Count; bs.ModifiedFiles = m_diffedfiles; bs.AddedFiles = m_addedfiles; bs.ExaminedFiles = m_examinedfiles; bs.OpenedFiles = m_filesopened; bs.SizeOfModifiedFiles = m_diffedfilessize; bs.SizeOfAddedFiles = m_addedfilessize; bs.SizeOfExaminedFiles = m_examinedfilesize; if (m_unproccesed != null && m_unproccesed.Files != null) bs.UnprocessedFiles = m_unproccesed.Files.Count; if (m_newfolders != null) bs.AddedFolders = m_newfolders.Count; } if (m_snapshot != null) { m_snapshot.Dispose(); m_snapshot = null; } }
/// <summary> /// Appends a file to the content archive, watching the content archive file size. /// Does not record anything in either content or signature volumes /// Returns the partial file entry if the volume size was exceeded. /// Returns null if the file was written entirely. /// </summary> /// <param name="entry">The entry that describes the partial file</param> /// <param name="contentfile">The content archive file</param> /// <param name="volumesize">The max allowed volumesize</param> /// <returns>The partial file entry if the volume size was exceeded. Returns null if the file was written entirely.</returns> private PartialFileEntry WritePossiblePartialInternal(PartialFileEntry entry, Library.Interface.ICompression contentfile, long volumesize) { //append chuncks of 1kb, checking on the total size after each write byte[] tmp = new byte[1024]; using (System.IO.Stream s3 = contentfile.CreateFile(entry.relativeName, entry.LastWriteTime)) { int a; while ((a = entry.Stream.Read(tmp, 0, tmp.Length)) != 0) { s3.Write(tmp, 0, a); if (contentfile.Size + contentfile.FlushBufferSize + entry.ExtraSize + tmp.Length > volumesize) return entry; } } return null; }
/// <summary> /// Appends a file to the content and signature archives, watching the content archive file size. /// Returns the partial file entry if the volume size was exceeded. /// Returns null if the file was written entirely. /// </summary> /// <param name="entry">The entry that describes the partial file</param> /// <param name="contentfile">The content archive file</param> /// <param name="signaturefile">The signature archive file</param> /// <param name="volumesize">The max allowed volumesize</param> /// <returns>The partial file entry if the volume size was exceeded. Returns null if the file was written entirely.</returns> private PartialFileEntry WritePossiblePartial(PartialFileEntry entry, Library.Interface.ICompression contentfile, Library.Interface.ICompression signaturefile, long volumesize) { long startPos = entry.Stream.Position; //Protect against writing this file if there is not enough space to hold the INCOMPLETE_FILE if (startPos == 0 && contentfile.Size + contentfile.FlushBufferSize + (entry.ExtraSize * 2) > volumesize) return entry; PartialFileEntry pe = WritePossiblePartialInternal(entry, contentfile, volumesize); if (pe != null) { //The record is (still) partial string[] tmplines = new PartialEntryRecord(entry.relativeName, startPos, entry.Stream.Position - startPos, entry.Stream.Length).Serialize(); contentfile.WriteAllLines(INCOMPLETE_FILE, tmplines); signaturefile.WriteAllLines(INCOMPLETE_FILE, tmplines); //If we are debugging, this can be nice to have Logging.Log.WriteMessage(string.Format(Strings.RSyncDir.PartialFileAddedLogMessage, entry.relativeName, startPos), XervBackup.Library.Logging.LogMessageType.Information); } else { //If the file was partial before, mark the file as completed if (startPos != 0) { string[] tmplines = new PartialEntryRecord(entry.relativeName, startPos, entry.Stream.Position - startPos, entry.Stream.Length).Serialize(); contentfile.WriteAllLines(COMPLETED_FILE, tmplines); signaturefile.WriteAllLines(COMPLETED_FILE, tmplines); } //Add signature AFTER content is completed. //If content is present, it is restoreable, if signature is missing, file will be backed up on next run //If signature is present, but not content, the entire differential sequence will be unable to recover the file if (!entry.DumpSignature(signaturefile)) { if (m_stat != null) m_stat.LogWarning(string.Format(Strings.RSyncDir.FileChangedWhileReadWarning, entry.fullname), null); } entry.Dispose(); } return pe; }
/// <summary> /// Appends a file to the content and signature archives. /// If the file existed, a delta file is added, otherwise the entire file is added. /// </summary> /// <param name="fs">The file to add</param> /// <param name="s">The full name of the file</param> /// <param name="signature">The signature archive file</param> /// <param name="contentfile">The content archive file</param> /// <param name="signaturefile">The signature stream to add</param> /// <param name="volumesize">The max size of the volume</param> /// <param name="lastWrite">The time the source file was last written</param> /// <returns>The current size of the content archive</returns> private long AddFileToCompression(System.IO.Stream fs, string s, System.IO.Stream signature, Library.Interface.ICompression contentfile, Library.Interface.ICompression signaturefile, long volumesize, DateTime lastWrite) { fs.Position = 0; string relpath = GetRelativeName(s); if (m_modifiedFiles.ContainsKey(s)) { //Existing file, write the delta file string target = System.IO.Path.Combine(DELTA_ROOT, relpath); string signaturepath = System.IO.Path.Combine(DELTA_SIGNATURE_ROOT, relpath); using (System.IO.Stream sigfs = m_oldSignatures[relpath].OpenRead(relpath)) { long lbefore = contentfile.Size; Utility.TempFileStream deltaTemp = null; try { deltaTemp = new XervBackup.Library.Utility.TempFileStream(); SharpRSync.Interface.GenerateDelta(sigfs, fs, deltaTemp); deltaTemp.Position = 0; m_lastPartialFile = WritePossiblePartial(new PartialFileEntry(deltaTemp, target, s, 0, signature, signaturepath, lastWrite), contentfile, signaturefile, volumesize); } catch { if (deltaTemp != null) deltaTemp.Dispose(); throw; } m_diffsize += contentfile.Size - lbefore; m_diffedfilessize += fs.Length; m_diffedfiles++; } m_modifiedFiles.Remove(s); m_oldSignatures.Remove(relpath); } else { //New file, write as content string signaturepath = System.IO.Path.Combine(CONTENT_SIGNATURE_ROOT, relpath); string target = System.IO.Path.Combine(CONTENT_ROOT, relpath); long size = fs.Length; m_lastPartialFile = WritePossiblePartial(new PartialFileEntry(fs, target, s, 0, signature, signaturepath, lastWrite), contentfile, signaturefile, volumesize); m_addedfiles++; m_addedfilessize += size; m_newfiles.Remove(s); } return contentfile.Size; }
/// <summary> /// Creates a signature/content pair. /// Returns true when all files are processed. /// Returns false if there are still files to process. /// This method will only return false if the volumesize or remainingSize is exceeded. /// </summary> /// <param name="signaturefile">The signaure archive file</param> /// <param name="contentfile">The content archive file</param> /// <param name="volumesize">The max size of this volume</param> /// <param name="remainingSize">The max remaining size of arhive space</param> /// <returns>False if there are still files to process, true if all files are processed</returns> public bool MakeMultiPassDiff(Library.Interface.ICompression signaturefile, Library.Interface.ICompression contentfile, long volumesize) { if (m_unproccesed == null) throw new Exception(Strings.RSyncDir.MultipassUsageError); Random r = new Random(); long totalSize = 0; //Insert the marker file contentfile.CreateFile(UTC_TIME_MARKER).Dispose(); signaturefile.CreateFile(UTC_TIME_MARKER).Dispose(); if (m_isfirstmultipass) { //We write these files to the very first volume if (m_deletedfolders.Count > 0) { signaturefile.WriteAllLines(DELETED_FOLDERS, m_deletedfolders.ToArray()); contentfile.WriteAllLines(DELETED_FOLDERS, m_deletedfolders.ToArray()); } if (m_newfolders.Count > 0) { string[] folders = new string[m_newfolders.Count]; string[] timestamps = new string[m_newfolders.Count]; for (int i = 0; i < m_newfolders.Count; i++) { folders[i] = m_newfolders[i].Key; timestamps[i] = ((long)((m_newfolders[i].Value - Utility.Utility.EPOCH).TotalSeconds)).ToString(); } folders = FilenamesToPlatformIndependant(folders); signaturefile.WriteAllLines(ADDED_FOLDERS, folders); signaturefile.WriteAllLines(ADDED_FOLDERS_TIMESTAMPS, timestamps); contentfile.WriteAllLines(ADDED_FOLDERS, folders); contentfile.WriteAllLines(ADDED_FOLDERS_TIMESTAMPS, timestamps); } if (m_updatedfolders.Count > 0) { string[] folders = new string[m_updatedfolders.Count]; string[] timestamps = new string[m_updatedfolders.Count]; for (int i = 0; i < m_updatedfolders.Count; i++) { folders[i] = m_updatedfolders[i].Key; timestamps[i] = ((long)((m_updatedfolders[i].Value - Utility.Utility.EPOCH).TotalSeconds)).ToString(); } folders = FilenamesToPlatformIndependant(folders); signaturefile.WriteAllLines(UPDATED_FOLDERS, folders); signaturefile.WriteAllLines(UPDATED_FOLDERS_TIMESTAMPS, timestamps); contentfile.WriteAllLines(UPDATED_FOLDERS, folders); contentfile.WriteAllLines(UPDATED_FOLDERS_TIMESTAMPS, timestamps); } m_isfirstmultipass = false; } //Last update was a looong time ago DateTime nextProgressEvent = DateTime.Now.AddYears(-1); if (m_lastPartialFile != null) { if (ProgressEvent != null) { int pg = 100 - ((int)((m_unproccesed.Files.Count / (double)m_totalfiles) * 100)); nextProgressEvent = DateTime.Now + PROGRESS_TIMESPAN; ProgressEvent(pg, m_lastPartialFile.fullname); } m_lastPartialFile = WritePossiblePartial(m_lastPartialFile, contentfile, signaturefile, volumesize); } while (m_unproccesed.Files.Count > 0) { if (m_lastPartialFile != null) return false; if (totalSize >= volumesize) break; int next = m_sortedfilelist ? 0 : r.Next(0, m_unproccesed.Files.Count); string s = m_unproccesed.Files[next]; m_unproccesed.Files.RemoveAt(next); if (ProgressEvent != null && DateTime.Now > nextProgressEvent) { int pg = 100 - ((int)((m_unproccesed.Files.Count / (double)m_totalfiles) * 100)); nextProgressEvent = DateTime.Now + PROGRESS_TIMESPAN; ProgressEvent(pg, s); } try { if (!m_disableFiletimeCheck) { //TODO: Make this check faster somehow string relpath = GetRelativeName(s); if (m_oldSignatures.ContainsKey(relpath)) { try { //Reports show that the file time can be missing :( DateTime lastFileWrite = m_snapshot.GetLastWriteTime(s).ToUniversalTime(); //Cut off as we only preserve precision in seconds after compression lastFileWrite = new DateTime(lastFileWrite.Year, lastFileWrite.Month, lastFileWrite.Day, lastFileWrite.Hour, lastFileWrite.Minute, lastFileWrite.Second, DateTimeKind.Utc); DateTime lastCheck; if (!m_lastVerificationTime.TryGetValue(relpath, out lastCheck)) lastCheck = m_oldSignatures[relpath].CreateTime; //Compare with the modification time of the last known check time if (lastFileWrite <= lastCheck) { m_oldSignatures.Remove(relpath); m_examinedfiles++; continue; } } catch (Exception ex) { Logging.Log.WriteMessage(string.Format(Strings.RSyncDir.InvalidTimeStampError, s, ex.Message), XervBackup.Library.Logging.LogMessageType.Warning, ex); } } } if (m_unproccesed.Errors.Count > 0 && m_unproccesed.IsAffectedByError(s)) m_unproccesed.FilesWithError.Add(s); else { System.IO.Stream fs = null; try { bool isLockedStream = false; m_filesopened++; //We cannot have a "using" directive here because the fs may need to survive multiple rounds try { fs = m_snapshot.OpenRead(s); } catch { if (m_snapshot is Snapshots.NoSnapshot && m_openfilepolicy != Options.OpenFileStrategy.Ignore) { try { fs = ((Snapshots.NoSnapshot)m_snapshot).OpenLockedRead(s); } catch { } //Rethrow original error if (fs == null) throw; isLockedStream = true; } else throw; } DateTime lastWrite = Utility.Utility.EPOCH; try { //Record the change time after we opened (and thus locked) the file lastWrite = m_snapshot.GetLastWriteTime(s).ToUniversalTime(); //Cut off as we only preserve precision in seconds lastWrite = new DateTime(lastWrite.Year, lastWrite.Month, lastWrite.Day, lastWrite.Hour, lastWrite.Minute, lastWrite.Second, DateTimeKind.Utc); } catch (Exception ex) { Logging.Log.WriteMessage(string.Format(Strings.RSyncDir.InvalidTimeStampError, s, ex.Message), XervBackup.Library.Logging.LogMessageType.Warning, ex); } if (fs.Length > m_maxFileSize) { m_unproccesed.FilesTooLarge.Add(s); } else { //If the file is > 10mb, update the display to show the file being processed if (ProgressEvent != null && fs.Length > 1024 * 1024 * 10) { int pg = 100 - ((int)((m_unproccesed.Files.Count / (double)m_totalfiles) * 100)); nextProgressEvent = DateTime.Now + PROGRESS_TIMESPAN; ProgressEvent(pg, s); } System.IO.Stream signature = ProccessDiff(fs, s, signaturefile); if (signature == null) { //If we had to check the file, it's timestamp was modified, so we record that the file is still unchanged // so we can avoid checking the next time if (!m_disableFiletimeCheck) m_checkedUnchangedFiles.Add(GetRelativeName(s)); //TODO: If the file timestamp was changed AFTER the backup started, we will record it in this and the next backup. // This can be avoided, but only happens if the file was not modified, so it will happen rarely } else { System.IO.Stream originalSignature = null; //If the stream was locked, we hijack it here to ensure that the signature recorded // matches the file data being read if (isLockedStream) { if (m_openfilepolicy == Options.OpenFileStrategy.Copy) { using (MemoryStream newSig = new MemoryStream()) { fs.Position = 0; using (SharpRSync.ChecksumGeneratingStream ts = new SharpRSync.ChecksumGeneratingStream(newSig, fs)) { newSig.Capacity = Math.Max(ts.BytesGeneratedForSignature((long)(fs.Length * FILESIZE_GROW_MARGIN_MULTIPLIER)), newSig.Capacity); fs = new Utility.TempFileStream(); Utility.Utility.CopyStream(ts, fs, false); } fs.Position = 0; signature.Position = 0; newSig.Position = 0; if (!Utility.Utility.CompareStreams(signature, newSig, true)) throw new Exception(string.Format(Strings.RSyncDir.FileChangedWhileReadError, s)); signature.Position = 0; } } else { //Keep a copy of the original signature for change detection originalSignature = signature; //Set up for a new round signature = new System.IO.MemoryStream(); fs.Position = 0; long filelen = fs.Length; fs = new SharpRSync.ChecksumGeneratingStream(signature, fs); ((MemoryStream)signature).Capacity = Math.Max(((SharpRSync.ChecksumGeneratingStream)fs).BytesGeneratedForSignature(filelen), ((MemoryStream)signature).Capacity); } } totalSize = AddFileToCompression(fs, s, signature, contentfile, signaturefile, volumesize, lastWrite); //If this turned into a partial full entry, we must keep the file open. //The file will be closed when m_lastPartialFile is disposed if (m_lastPartialFile != null) { m_lastPartialFile.OriginalSignatureStream = originalSignature; if (m_lastPartialFile.Stream == fs) fs = null; } } } } finally { try { if (fs != null) fs.Dispose(); } catch { } } } } catch (Exception ex) { if (m_stat != null) m_stat.LogError(string.Format(Strings.RSyncDir.FileProcessError, s, ex.Message), ex); Logging.Log.WriteMessage(string.Format(Strings.RSyncDir.FileProcessError, s, ex.Message), XervBackup.Library.Logging.LogMessageType.Error, ex); m_unproccesed.FilesWithError.Add(s); } } if (m_unproccesed.Files.Count == 0 && m_lastPartialFile == null) return FinalizeMultiPass(signaturefile, contentfile, volumesize); else return false; }