/// <summary> /// Ends the sequence of creating a content/signature pair. /// Writes the list of deleted files to the archives. /// </summary> /// <param name="signaturefile">The signature archive file</param> /// <param name="contentfile">The content archive file</param> /// <param name="volumesize">The max volume size</param> /// <returns>True if the volume is completed, false otherwise</returns> public bool FinalizeMultiPass(Library.Interface.ICompression signaturefile, Library.Interface.ICompression contentfile, long volumesize) { if (!m_finalized) { if (m_unproccesed.Files.Count == 0) { long stringsize = 0; foreach (string s in m_oldSignatures.Keys) { string sourcefolder = "<unknown>"; try { string fullpath = GetFullPathFromRelname(s); sourcefolder = GetSourceFolder(fullpath); if (!m_unproccesed.IsAffectedByError(fullpath)) { m_deletedfiles.Add(s); stringsize += System.Text.Encoding.UTF8.GetByteCount(s + "\r\n"); } } catch (Exception ex) { if (m_stat != null) m_stat.LogError(string.Format(Strings.RSyncDir.DeletedFilenameError, s, sourcefolder), ex); Logging.Log.WriteMessage(string.Format(Strings.RSyncDir.DeletedFilenameError, s, sourcefolder), XervBackup.Library.Logging.LogMessageType.Error, ex); m_unproccesed.FilesWithError.Add(s); } } m_oldSignatures.Clear(); if (m_deletedfiles.Count > 0) { //The +100 is a safety margin stringsize += System.Text.Encoding.UTF8.GetByteCount(DELETED_FILES) + 100; if (contentfile.Size + contentfile.FlushBufferSize + stringsize > volumesize) return false; //The followup cannot fit in the volume, so we make a full new volume signaturefile.WriteAllLines(DELETED_FILES, m_deletedfiles.ToArray()); contentfile.WriteAllLines(DELETED_FILES, m_deletedfiles.ToArray()); m_deletedfiles.Clear(); } //We only write the USN if all files were processed if (m_currentUSN != null) using (System.IO.Stream s = signaturefile.CreateFile(USN_VALUES)) m_currentUSN.Save(s); //Only write this if all files were processed if (m_checkedUnchangedFiles.Count > 0) signaturefile.WriteAllLines(UNMODIFIED_FILES, m_checkedUnchangedFiles.ToArray()); if (m_unproccesed.Symlinks.Count > 0) { foreach(KeyValuePair<string, string> kvp in m_unproccesed.Symlinks) { string target = FilenamesToPlatformIndependant(new string[] { kvp.Value })[0]; string source = Path.Combine(SYMLINK_ROOT, GetRelativeName(kvp.Key)); byte[] targetBytes = Encoding.UTF8.GetBytes(target); contentfile.WriteAllBytes(source, targetBytes); signaturefile.WriteAllBytes(source, targetBytes); } m_unproccesed.Symlinks.Clear(); } } m_finalized = true; } return m_finalized; }
/// <summary> /// Appends a file to the content archive, watching the content archive file size. /// Does not record anything in either content or signature volumes /// Returns the partial file entry if the volume size was exceeded. /// Returns null if the file was written entirely. /// </summary> /// <param name="entry">The entry that describes the partial file</param> /// <param name="contentfile">The content archive file</param> /// <param name="volumesize">The max allowed volumesize</param> /// <returns>The partial file entry if the volume size was exceeded. Returns null if the file was written entirely.</returns> private PartialFileEntry WritePossiblePartialInternal(PartialFileEntry entry, Library.Interface.ICompression contentfile, long volumesize) { //append chuncks of 1kb, checking on the total size after each write byte[] tmp = new byte[1024]; using (System.IO.Stream s3 = contentfile.CreateFile(entry.relativeName, entry.LastWriteTime)) { int a; while ((a = entry.Stream.Read(tmp, 0, tmp.Length)) != 0) { s3.Write(tmp, 0, a); if (contentfile.Size + contentfile.FlushBufferSize + entry.ExtraSize + tmp.Length > volumesize) return entry; } } return null; }
public bool DumpSignature(Library.Interface.ICompression signatureArchive) { bool success = true; //Add signature AFTER content. //If content is present, it is restoreable, if signature is missing, file will be backed up on next run //If signature is present, but not content, the entire differential sequence will be unable to recover the file if (m_fs is SharpRSync.ChecksumGeneratingStream) m_fs.Flush(); using (m_signatureStream) { if (m_originalSignatureStream != null) { //Rewind both streams m_originalSignatureStream.Position = 0; m_signatureStream.Position = 0; success = Utility.Utility.CompareStreams(m_originalSignatureStream, m_signatureStream, true); //Rewind signature m_signatureStream.Position = 0; } using (System.IO.Stream s3 = signatureArchive.CreateFile(this.m_signaturePath, m_lastWrite)) Utility.Utility.CopyStream(m_signatureStream, s3, true); } m_signatureStream = null; return success; }
/// <summary> /// Creates a signature/content pair. /// Returns true when all files are processed. /// Returns false if there are still files to process. /// This method will only return false if the volumesize or remainingSize is exceeded. /// </summary> /// <param name="signaturefile">The signaure archive file</param> /// <param name="contentfile">The content archive file</param> /// <param name="volumesize">The max size of this volume</param> /// <param name="remainingSize">The max remaining size of arhive space</param> /// <returns>False if there are still files to process, true if all files are processed</returns> public bool MakeMultiPassDiff(Library.Interface.ICompression signaturefile, Library.Interface.ICompression contentfile, long volumesize) { if (m_unproccesed == null) throw new Exception(Strings.RSyncDir.MultipassUsageError); Random r = new Random(); long totalSize = 0; //Insert the marker file contentfile.CreateFile(UTC_TIME_MARKER).Dispose(); signaturefile.CreateFile(UTC_TIME_MARKER).Dispose(); if (m_isfirstmultipass) { //We write these files to the very first volume if (m_deletedfolders.Count > 0) { signaturefile.WriteAllLines(DELETED_FOLDERS, m_deletedfolders.ToArray()); contentfile.WriteAllLines(DELETED_FOLDERS, m_deletedfolders.ToArray()); } if (m_newfolders.Count > 0) { string[] folders = new string[m_newfolders.Count]; string[] timestamps = new string[m_newfolders.Count]; for (int i = 0; i < m_newfolders.Count; i++) { folders[i] = m_newfolders[i].Key; timestamps[i] = ((long)((m_newfolders[i].Value - Utility.Utility.EPOCH).TotalSeconds)).ToString(); } folders = FilenamesToPlatformIndependant(folders); signaturefile.WriteAllLines(ADDED_FOLDERS, folders); signaturefile.WriteAllLines(ADDED_FOLDERS_TIMESTAMPS, timestamps); contentfile.WriteAllLines(ADDED_FOLDERS, folders); contentfile.WriteAllLines(ADDED_FOLDERS_TIMESTAMPS, timestamps); } if (m_updatedfolders.Count > 0) { string[] folders = new string[m_updatedfolders.Count]; string[] timestamps = new string[m_updatedfolders.Count]; for (int i = 0; i < m_updatedfolders.Count; i++) { folders[i] = m_updatedfolders[i].Key; timestamps[i] = ((long)((m_updatedfolders[i].Value - Utility.Utility.EPOCH).TotalSeconds)).ToString(); } folders = FilenamesToPlatformIndependant(folders); signaturefile.WriteAllLines(UPDATED_FOLDERS, folders); signaturefile.WriteAllLines(UPDATED_FOLDERS_TIMESTAMPS, timestamps); contentfile.WriteAllLines(UPDATED_FOLDERS, folders); contentfile.WriteAllLines(UPDATED_FOLDERS_TIMESTAMPS, timestamps); } m_isfirstmultipass = false; } //Last update was a looong time ago DateTime nextProgressEvent = DateTime.Now.AddYears(-1); if (m_lastPartialFile != null) { if (ProgressEvent != null) { int pg = 100 - ((int)((m_unproccesed.Files.Count / (double)m_totalfiles) * 100)); nextProgressEvent = DateTime.Now + PROGRESS_TIMESPAN; ProgressEvent(pg, m_lastPartialFile.fullname); } m_lastPartialFile = WritePossiblePartial(m_lastPartialFile, contentfile, signaturefile, volumesize); } while (m_unproccesed.Files.Count > 0) { if (m_lastPartialFile != null) return false; if (totalSize >= volumesize) break; int next = m_sortedfilelist ? 0 : r.Next(0, m_unproccesed.Files.Count); string s = m_unproccesed.Files[next]; m_unproccesed.Files.RemoveAt(next); if (ProgressEvent != null && DateTime.Now > nextProgressEvent) { int pg = 100 - ((int)((m_unproccesed.Files.Count / (double)m_totalfiles) * 100)); nextProgressEvent = DateTime.Now + PROGRESS_TIMESPAN; ProgressEvent(pg, s); } try { if (!m_disableFiletimeCheck) { //TODO: Make this check faster somehow string relpath = GetRelativeName(s); if (m_oldSignatures.ContainsKey(relpath)) { try { //Reports show that the file time can be missing :( DateTime lastFileWrite = m_snapshot.GetLastWriteTime(s).ToUniversalTime(); //Cut off as we only preserve precision in seconds after compression lastFileWrite = new DateTime(lastFileWrite.Year, lastFileWrite.Month, lastFileWrite.Day, lastFileWrite.Hour, lastFileWrite.Minute, lastFileWrite.Second, DateTimeKind.Utc); DateTime lastCheck; if (!m_lastVerificationTime.TryGetValue(relpath, out lastCheck)) lastCheck = m_oldSignatures[relpath].CreateTime; //Compare with the modification time of the last known check time if (lastFileWrite <= lastCheck) { m_oldSignatures.Remove(relpath); m_examinedfiles++; continue; } } catch (Exception ex) { Logging.Log.WriteMessage(string.Format(Strings.RSyncDir.InvalidTimeStampError, s, ex.Message), XervBackup.Library.Logging.LogMessageType.Warning, ex); } } } if (m_unproccesed.Errors.Count > 0 && m_unproccesed.IsAffectedByError(s)) m_unproccesed.FilesWithError.Add(s); else { System.IO.Stream fs = null; try { bool isLockedStream = false; m_filesopened++; //We cannot have a "using" directive here because the fs may need to survive multiple rounds try { fs = m_snapshot.OpenRead(s); } catch { if (m_snapshot is Snapshots.NoSnapshot && m_openfilepolicy != Options.OpenFileStrategy.Ignore) { try { fs = ((Snapshots.NoSnapshot)m_snapshot).OpenLockedRead(s); } catch { } //Rethrow original error if (fs == null) throw; isLockedStream = true; } else throw; } DateTime lastWrite = Utility.Utility.EPOCH; try { //Record the change time after we opened (and thus locked) the file lastWrite = m_snapshot.GetLastWriteTime(s).ToUniversalTime(); //Cut off as we only preserve precision in seconds lastWrite = new DateTime(lastWrite.Year, lastWrite.Month, lastWrite.Day, lastWrite.Hour, lastWrite.Minute, lastWrite.Second, DateTimeKind.Utc); } catch (Exception ex) { Logging.Log.WriteMessage(string.Format(Strings.RSyncDir.InvalidTimeStampError, s, ex.Message), XervBackup.Library.Logging.LogMessageType.Warning, ex); } if (fs.Length > m_maxFileSize) { m_unproccesed.FilesTooLarge.Add(s); } else { //If the file is > 10mb, update the display to show the file being processed if (ProgressEvent != null && fs.Length > 1024 * 1024 * 10) { int pg = 100 - ((int)((m_unproccesed.Files.Count / (double)m_totalfiles) * 100)); nextProgressEvent = DateTime.Now + PROGRESS_TIMESPAN; ProgressEvent(pg, s); } System.IO.Stream signature = ProccessDiff(fs, s, signaturefile); if (signature == null) { //If we had to check the file, it's timestamp was modified, so we record that the file is still unchanged // so we can avoid checking the next time if (!m_disableFiletimeCheck) m_checkedUnchangedFiles.Add(GetRelativeName(s)); //TODO: If the file timestamp was changed AFTER the backup started, we will record it in this and the next backup. // This can be avoided, but only happens if the file was not modified, so it will happen rarely } else { System.IO.Stream originalSignature = null; //If the stream was locked, we hijack it here to ensure that the signature recorded // matches the file data being read if (isLockedStream) { if (m_openfilepolicy == Options.OpenFileStrategy.Copy) { using (MemoryStream newSig = new MemoryStream()) { fs.Position = 0; using (SharpRSync.ChecksumGeneratingStream ts = new SharpRSync.ChecksumGeneratingStream(newSig, fs)) { newSig.Capacity = Math.Max(ts.BytesGeneratedForSignature((long)(fs.Length * FILESIZE_GROW_MARGIN_MULTIPLIER)), newSig.Capacity); fs = new Utility.TempFileStream(); Utility.Utility.CopyStream(ts, fs, false); } fs.Position = 0; signature.Position = 0; newSig.Position = 0; if (!Utility.Utility.CompareStreams(signature, newSig, true)) throw new Exception(string.Format(Strings.RSyncDir.FileChangedWhileReadError, s)); signature.Position = 0; } } else { //Keep a copy of the original signature for change detection originalSignature = signature; //Set up for a new round signature = new System.IO.MemoryStream(); fs.Position = 0; long filelen = fs.Length; fs = new SharpRSync.ChecksumGeneratingStream(signature, fs); ((MemoryStream)signature).Capacity = Math.Max(((SharpRSync.ChecksumGeneratingStream)fs).BytesGeneratedForSignature(filelen), ((MemoryStream)signature).Capacity); } } totalSize = AddFileToCompression(fs, s, signature, contentfile, signaturefile, volumesize, lastWrite); //If this turned into a partial full entry, we must keep the file open. //The file will be closed when m_lastPartialFile is disposed if (m_lastPartialFile != null) { m_lastPartialFile.OriginalSignatureStream = originalSignature; if (m_lastPartialFile.Stream == fs) fs = null; } } } } finally { try { if (fs != null) fs.Dispose(); } catch { } } } } catch (Exception ex) { if (m_stat != null) m_stat.LogError(string.Format(Strings.RSyncDir.FileProcessError, s, ex.Message), ex); Logging.Log.WriteMessage(string.Format(Strings.RSyncDir.FileProcessError, s, ex.Message), XervBackup.Library.Logging.LogMessageType.Error, ex); m_unproccesed.FilesWithError.Add(s); } } if (m_unproccesed.Files.Count == 0 && m_lastPartialFile == null) return FinalizeMultiPass(signaturefile, contentfile, volumesize); else return false; }