Example #1
0
        public static Task Run(Snapshots.ISnapshotService snapshot, Options options, BackupStatsCollector stats, BackupDatabase database)
        {
            return(AutomationExtensions.RunTask(
                       new
            {
                Input = Channels.ProcessedFiles.ForRead,
                Output = Channels.AcceptedChangedFile.ForWrite
            },

                       async self =>
            {
                var EMPTY_METADATA = Utility.WrapMetadata(new Dictionary <string, string>(), options);
                var blocksize = options.Blocksize;

                while (true)
                {
                    var e = await self.Input.ReadAsync();

                    long filestatsize = -1;
                    try
                    {
                        filestatsize = snapshot.GetFileSize(e.Path);
                    }
                    catch (Exception ex)
                    {
                        Logging.Log.WriteExplicitMessage(FILELOGTAG, "FailedToReadSize", ex, "Failed tp read size of file: {0}", e.Path);
                    }

                    await stats.AddExaminedFile(filestatsize);

                    e.MetaHashAndSize = options.StoreMetadata ? Utility.WrapMetadata(await MetadataGenerator.GenerateMetadataAsync(e.Path, e.Attributes, options, snapshot), options) : EMPTY_METADATA;

                    var timestampChanged = e.LastWrite != e.OldModified || e.LastWrite.Ticks == 0 || e.OldModified.Ticks == 0;
                    var filesizeChanged = filestatsize < 0 || e.LastFileSize < 0 || filestatsize != e.LastFileSize;
                    var tooLargeFile = options.SkipFilesLargerThan != long.MaxValue && options.SkipFilesLargerThan != 0 && filestatsize >= 0 && filestatsize > options.SkipFilesLargerThan;
                    e.MetadataChanged = !options.CheckFiletimeOnly && !options.SkipMetadata && (e.MetaHashAndSize.Blob.Length != e.OldMetaSize || e.MetaHashAndSize.FileHash != e.OldMetaHash);

                    if ((e.OldId < 0 || options.DisableFiletimeCheck || timestampChanged || filesizeChanged || e.MetadataChanged) && !tooLargeFile)
                    {
                        Logging.Log.WriteVerboseMessage(FILELOGTAG, "CheckFileForChanges", "Checking file for changes {0}, new: {1}, timestamp changed: {2}, size changed: {3}, metadatachanged: {4}, {5} vs {6}", e.Path, e.OldId <= 0, timestampChanged, filesizeChanged, e.MetadataChanged, e.LastWrite, e.OldModified);
                        await self.Output.WriteAsync(e);
                    }
                    else
                    {
                        if (tooLargeFile)
                        {
                            Logging.Log.WriteVerboseMessage(FILELOGTAG, "SkipCheckTooLarge", "Skipped checking file, because the size exceeds limit {0}", e.Path);
                        }
                        else
                        {
                            Logging.Log.WriteVerboseMessage(FILELOGTAG, "SkipCheckNoTimestampChange", "Skipped checking file, because timestamp was not updated {0}", e.Path);
                        }

                        await database.AddUnmodifiedAsync(e.OldId, e.LastWrite);
                    }
                }
            }));
        }
Example #2
0
        public static Task Run(Snapshots.ISnapshotService snapshot, BackupResults result, Options options, IFilter sourcefilter, IFilter filter, Common.ITaskReader taskreader, System.Threading.CancellationToken token)
        {
            // Make sure we create the enumeration process in a seperate scope,
            // but keep the log channel from the parent scope
            using (Logging.Log.StartIsolatingScope())
                using (new IsolatedChannelScope())
                {
                    var enumeratorTask = Backup.FileEnumerationProcess.Run(snapshot, options.FileAttributeFilter, sourcefilter, filter, options.SymlinkPolicy, options.HardlinkPolicy, options.ExcludeEmptyFolders, options.IgnoreFilenames, options.ChangedFilelist, taskreader);
                    var counterTask    = AutomationExtensions.RunTask(new
                    {
                        Input = Backup.Channels.SourcePaths.ForRead
                    },

                                                                      async self =>
                    {
                        var count = 0L;
                        var size  = 0L;

                        try
                        {
                            while (await taskreader.ProgressAsync && !token.IsCancellationRequested)
                            {
                                var path = await self.Input.ReadAsync();

                                count++;

                                try
                                {
                                    size += snapshot.GetFileSize(path);
                                }
                                catch
                                {
                                }

                                result.OperationProgressUpdater.UpdatefileCount(count, size, false);
                            }
                        }
                        finally
                        {
                            result.OperationProgressUpdater.UpdatefileCount(count, size, true);
                        }
                    });

                    return(Task.WhenAll(enumeratorTask, counterTask));
                }
        }
Example #3
0
        /// <summary>
        /// Performs the bulk of work by starting all relevant processes
        /// </summary>
        private static async Task RunMainOperation(Snapshots.ISnapshotService snapshot, Backup.BackupDatabase database, Backup.BackupStatsCollector stats, Options options, IFilter sourcefilter, IFilter filter, BackupResults result, Common.ITaskReader taskreader, long lastfilesetid)
        {
            using (new Logging.Timer(LOGTAG, "BackupMainOperation", "BackupMainOperation"))
            {
                // Make sure the CompressionHints table is initialized, otherwise all workers will initialize it
                var tb = options.CompressionHints.Count;

                Task all;
                using (new ChannelScope())
                {
                    all = Task.WhenAll(
                        new []
                    {
                        Backup.DataBlockProcessor.Run(database, options, taskreader),
                        Backup.FileBlockProcessor.Run(snapshot, options, database, stats, taskreader),
                        Backup.StreamBlockSplitter.Run(options, database, taskreader),
                        Backup.FileEnumerationProcess.Run(snapshot, options.FileAttributeFilter, sourcefilter, filter, options.SymlinkPolicy, options.HardlinkPolicy, options.ExcludeEmptyFolders, options.IgnoreFilenames, options.ChangedFilelist, taskreader),
                        Backup.FilePreFilterProcess.Run(snapshot, options, stats, database),
                        Backup.MetadataPreProcess.Run(snapshot, options, database, lastfilesetid),
                        Backup.SpillCollectorProcess.Run(options, database, taskreader),
                        Backup.ProgressHandler.Run(result)
                    }
                        // Spawn additional block hashers
                        .Union(
                            Enumerable.Range(0, options.ConcurrencyBlockHashers - 1).Select(x => Backup.StreamBlockSplitter.Run(options, database, taskreader))
                            )
                        // Spawn additional compressors
                        .Union(
                            Enumerable.Range(0, options.ConcurrencyCompressors - 1).Select(x => Backup.DataBlockProcessor.Run(database, options, taskreader))
                            )
                        );
                }

                await all;

                if (options.ChangedFilelist != null && options.ChangedFilelist.Length >= 1)
                {
                    await database.AppendFilesFromPreviousSetAsync(options.DeletedFilelist);
                }

                result.OperationProgressUpdater.UpdatefileCount(result.ExaminedFiles, result.SizeOfExaminedFiles, true);
            }
        }
Example #4
0
            public FilterHandler(Snapshots.ISnapshotService snapshot, FileAttributes attributeFilter, Duplicati.Library.Utility.IFilter sourcefilter, Duplicati.Library.Utility.IFilter filter, Options.SymlinkStrategy symlinkPolicy, Options.HardlinkStrategy hardlinkPolicy, ILogWriter logWriter)
            {
                m_snapshot = snapshot;
                m_attributeFilter = attributeFilter;
                m_sourcefilter = sourcefilter;
                m_emitfilter = filter;
                m_symlinkPolicy = symlinkPolicy;
                m_hardlinkPolicy = hardlinkPolicy;
                m_logWriter = logWriter;
                m_hardlinkmap = new Dictionary<string, string>();
                m_mixinqueue = new Queue<string>();

                bool includes;
                bool excludes;
                Library.Utility.FilterExpression.AnalyzeFilters(filter, out includes, out excludes);
                if (includes && !excludes)
                {
                    m_enumeratefilter = Library.Utility.FilterExpression.Combine(filter, new Duplicati.Library.Utility.FilterExpression("*" + System.IO.Path.DirectorySeparatorChar, true));
                }
                else
                    m_enumeratefilter = m_emitfilter;

            }
Example #5
0
        public static Task Run(IEnumerable <string> sources, Snapshots.ISnapshotService snapshot, UsnJournalService journalService, FileAttributes fileAttributes, Duplicati.Library.Utility.IFilter sourcefilter, Duplicati.Library.Utility.IFilter emitfilter, Options.SymlinkStrategy symlinkPolicy, Options.HardlinkStrategy hardlinkPolicy, bool excludeemptyfolders, string[] ignorenames, string[] changedfilelist, ITaskReader taskreader)
        {
            return(AutomationExtensions.RunTask(
                       new
            {
                Output = Backup.Channels.SourcePaths.ForWrite
            },

                       async self =>
            {
                var hardlinkmap = new Dictionary <string, string>();
                var mixinqueue = new Queue <string>();
                Duplicati.Library.Utility.IFilter enumeratefilter = emitfilter;

                bool includes;
                bool excludes;
                Library.Utility.FilterExpression.AnalyzeFilters(emitfilter, out includes, out excludes);
                if (includes && !excludes)
                {
                    enumeratefilter = Library.Utility.FilterExpression.Combine(emitfilter, new Duplicati.Library.Utility.FilterExpression("*" + System.IO.Path.DirectorySeparatorChar, true));
                }

                // Simplify checking for an empty list
                if (ignorenames != null && ignorenames.Length == 0)
                {
                    ignorenames = null;
                }

                // If we have a specific list, use that instead of enumerating the filesystem
                IEnumerable <string> worklist;
                if (changedfilelist != null && changedfilelist.Length > 0)
                {
                    worklist = changedfilelist.Where(x =>
                    {
                        var fa = FileAttributes.Normal;
                        try
                        {
                            fa = snapshot.GetAttributes(x);
                        }
                        catch
                        {
                        }

                        return AttributeFilter(x, fa, snapshot, sourcefilter, hardlinkPolicy, symlinkPolicy, hardlinkmap, fileAttributes, enumeratefilter, ignorenames, mixinqueue);
                    });
                }
                else
                {
                    Library.Utility.Utility.EnumerationFilterDelegate attributeFilter = (root, path, attr) =>
                                                                                        AttributeFilter(path, attr, snapshot, sourcefilter, hardlinkPolicy, symlinkPolicy, hardlinkmap, fileAttributes, enumeratefilter, ignorenames, mixinqueue);

                    if (journalService != null)
                    {
                        // filter sources using USN journal, to obtain a sub-set of files / folders that may have been modified
                        sources = journalService.GetModifiedSources(attributeFilter);
                    }

                    worklist = snapshot.EnumerateFilesAndFolders(sources, attributeFilter, (rootpath, errorpath, ex) =>
                    {
                        Logging.Log.WriteWarningMessage(FILTER_LOGTAG, "FileAccessError", ex, "Error reported while accessing file: {0}", errorpath);
                    });
                }

                var source = ExpandWorkList(worklist, mixinqueue, emitfilter, enumeratefilter);
                if (excludeemptyfolders)
                {
                    source = ExcludeEmptyFolders(source);
                }

                // Process each path, and dequeue the mixins with symlinks as we go
                foreach (var s in source)
                {
                    if (!await taskreader.ProgressAsync)
                    {
                        return;
                    }

                    await self.Output.WriteAsync(s);
                }
            }));
        }
Example #6
0
        /// <summary>
        /// Plugin filter for enumerating a list of files.
        /// </summary>
        /// <returns>True if the path should be returned, false otherwise.</returns>
        /// <param name="path">The current path.</param>
        /// <param name="attributes">The file or folder attributes.</param>
        private static bool AttributeFilter(string path, FileAttributes attributes, Snapshots.ISnapshotService snapshot, Library.Utility.IFilter sourcefilter, Options.HardlinkStrategy hardlinkPolicy, Options.SymlinkStrategy symlinkPolicy, Dictionary <string, string> hardlinkmap, FileAttributes fileAttributes, Duplicati.Library.Utility.IFilter enumeratefilter, string[] ignorenames, Queue <string> mixinqueue)
        {
            // Step 1, exclude block devices
            try
            {
                if (snapshot.IsBlockDevice(path))
                {
                    Logging.Log.WriteVerboseMessage(FILTER_LOGTAG, "ExcludingBlockDevice", "Excluding block device: {0}", path);
                    return(false);
                }
            }
            catch (Exception ex)
            {
                Logging.Log.WriteWarningMessage(FILTER_LOGTAG, "PathProcessingError", ex, "Failed to process path: {0}", path);
                return(false);
            }

            // Check if we explicitly include this entry
            Duplicati.Library.Utility.IFilter sourcematch;
            bool sourcematches;

            if (sourcefilter.Matches(path, out sourcematches, out sourcematch) && sourcematches)
            {
                Logging.Log.WriteVerboseMessage(FILTER_LOGTAG, "IncludingSourcePath", "Including source path: {0}", path);
                return(true);
            }

            // If we have a hardlink strategy, obey it
            if (hardlinkPolicy != Options.HardlinkStrategy.All)
            {
                try
                {
                    var id = snapshot.HardlinkTargetID(path);
                    if (id != null)
                    {
                        if (hardlinkPolicy == Options.HardlinkStrategy.None)
                        {
                            Logging.Log.WriteVerboseMessage(FILTER_LOGTAG, "ExcludingHardlinkByPolicy", "Excluding hardlink: {0} ({1})", path, id);
                            return(false);
                        }
                        else if (hardlinkPolicy == Options.HardlinkStrategy.First)
                        {
                            string prevPath;
                            if (hardlinkmap.TryGetValue(id, out prevPath))
                            {
                                Logging.Log.WriteVerboseMessage(FILTER_LOGTAG, "ExcludingDuplicateHardlink", "Excluding hardlink ({1}) for: {0}, previous hardlink: {2}", path, id, prevPath);
                                return(false);
                            }
                            else
                            {
                                hardlinkmap.Add(id, path);
                            }
                        }
                    }
                }
                catch (Exception ex)
                {
                    Logging.Log.WriteWarningMessage(FILTER_LOGTAG, "PathProcessingError", ex, "Failed to process path: {0}", path);
                    return(false);
                }
            }

            if (ignorenames != null && (attributes & FileAttributes.Directory) != 0)
            {
                try
                {
                    foreach (var n in ignorenames)
                    {
                        var ignorepath = SnapshotUtility.SystemIO.PathCombine(path, n);
                        if (snapshot.FileExists(ignorepath))
                        {
                            Logging.Log.WriteVerboseMessage(FILTER_LOGTAG, "ExcludingPathDueToIgnoreFile", "Excluding path because ignore file was found: {0}", ignorepath);
                            return(false);
                        }
                    }
                }
                catch (Exception ex)
                {
                    Logging.Log.WriteWarningMessage(FILTER_LOGTAG, "PathProcessingError", ex, "Failed to process path: {0}", path);
                }
            }

            // If we exclude files based on attributes, filter that
            if ((fileAttributes & attributes) != 0)
            {
                Logging.Log.WriteVerboseMessage(FILTER_LOGTAG, "ExcludingPathFromAttributes", "Excluding path due to attribute filter: {0}", path);
                return(false);
            }

            // Then check if the filename is not explicitly excluded by a filter
            Library.Utility.IFilter match;
            var filtermatch = false;

            if (!Library.Utility.FilterExpression.Matches(enumeratefilter, path, out match))
            {
                Logging.Log.WriteVerboseMessage(FILTER_LOGTAG, "ExcludingPathFromFilter", "Excluding path due to filter: {0} => {1}", path, match == null ? "null" : match.ToString());
                return(false);
            }
            else if (match != null)
            {
                filtermatch = true;
                Logging.Log.WriteVerboseMessage(FILTER_LOGTAG, "IncludingPathFromFilter", "Including path due to filter: {0} => {1}", path, match.ToString());
            }

            // If the file is a symlink, apply special handling
            var    isSymlink     = snapshot.IsSymlink(path, attributes);
            string symlinkTarget = null;

            if (isSymlink)
            {
                try { symlinkTarget = snapshot.GetSymlinkTarget(path); }
                catch (Exception ex) { Logging.Log.WriteExplicitMessage(FILTER_LOGTAG, "SymlinkTargetReadError", ex, "Failed to read symlink target for path: {0}", path); }
            }

            if (isSymlink)
            {
                if (!string.IsNullOrWhiteSpace(symlinkTarget))
                {
                    if (symlinkPolicy == Options.SymlinkStrategy.Ignore)
                    {
                        Logging.Log.WriteVerboseMessage(FILTER_LOGTAG, "ExcludeSymlink", "Excluding symlink: {0}", path);
                        return(false);
                    }

                    if (symlinkPolicy == Options.SymlinkStrategy.Store)
                    {
                        Logging.Log.WriteVerboseMessage(FILTER_LOGTAG, "StoreSymlink", "Storing symlink: {0}", path);

                        // We return false because we do not want to recurse into the path,
                        // but we add the symlink to the mixin so we process the symlink itself
                        mixinqueue.Enqueue(path);
                        return(false);
                    }
                }
                else
                {
                    Logging.Log.WriteVerboseMessage(FILTER_LOGTAG, "FollowingEmptySymlink", "Treating empty symlink as regular path {0}", path);
                }
            }

            if (!filtermatch)
            {
                Logging.Log.WriteVerboseMessage(FILTER_LOGTAG, "IncludingPath", "Including path as no filters matched: {0}", path);
            }

            // All the way through, yes!
            return(true);
        }
Example #7
0
        public static Task Run(Snapshots.ISnapshotService snapshot, Options options, BackupDatabase database, long lastfilesetid, CancellationToken token)
        {
            return(AutomationExtensions.RunTask(new
            {
                Input = Backup.Channels.SourcePaths.ForRead,
                StreamBlockChannel = Channels.StreamBlock.ForWrite,
                Output = Backup.Channels.ProcessedFiles.ForWrite,
            },

                                                async self =>
            {
                var emptymetadata = Utility.WrapMetadata(new Dictionary <string, string>(), options);
                var prevprefix = new KeyValuePair <string, long>(null, -1);

                var CHECKFILETIMEONLY = options.CheckFiletimeOnly;
                var DISABLEFILETIMECHECK = options.DisableFiletimeCheck;

                while (true)
                {
                    var path = await self.Input.ReadAsync();

                    var lastwrite = new DateTime(0, DateTimeKind.Utc);
                    var attributes = default(FileAttributes);
                    try
                    {
                        lastwrite = snapshot.GetLastWriteTimeUtc(path);
                    }
                    catch (Exception ex)
                    {
                        Logging.Log.WriteWarningMessage(FILELOGTAG, "TimestampReadFailed", ex, "Failed to read timestamp on \"{0}\"", path);
                    }

                    try
                    {
                        attributes = snapshot.GetAttributes(path);
                    }
                    catch (Exception ex)
                    {
                        Logging.Log.WriteVerboseMessage(FILELOGTAG, "FailedAttributeRead", "Failed to read attributes from {0}: {1}", path, ex.Message);
                    }

                    // If we only have metadata, stop here
                    if (await ProcessMetadata(path, attributes, lastwrite, options, snapshot, emptymetadata, database, self.StreamBlockChannel).ConfigureAwait(false))
                    {
                        try
                        {
                            var split = Database.LocalDatabase.SplitIntoPrefixAndName(path);

                            long prefixid;
                            if (string.Equals(prevprefix.Key, split.Key, StringComparison.Ordinal))
                            {
                                prefixid = prevprefix.Value;
                            }
                            else
                            {
                                prefixid = await database.GetOrCreatePathPrefix(split.Key);
                                prevprefix = new KeyValuePair <string, long>(split.Key, prefixid);
                            }

                            if (CHECKFILETIMEONLY || DISABLEFILETIMECHECK)
                            {
                                var tmp = await database.GetFileLastModifiedAsync(prefixid, split.Value, lastfilesetid, false);
                                await self.Output.WriteAsync(new FileEntry
                                {
                                    OldId = tmp.Item1,
                                    Path = path,
                                    PathPrefixID = prefixid,
                                    Filename = split.Value,
                                    Attributes = attributes,
                                    LastWrite = lastwrite,
                                    OldModified = tmp.Item2,
                                    LastFileSize = tmp.Item3,
                                    OldMetaHash = null,
                                    OldMetaSize = -1
                                });
                            }
                            else
                            {
                                var res = await database.GetFileEntryAsync(prefixid, split.Value, lastfilesetid);
                                await self.Output.WriteAsync(new FileEntry
                                {
                                    OldId = res == null ? -1 : res.id,
                                    Path = path,
                                    PathPrefixID = prefixid,
                                    Filename = split.Value,
                                    Attributes = attributes,
                                    LastWrite = lastwrite,
                                    OldModified = res == null ? new DateTime(0) : res.modified,
                                    LastFileSize = res == null ? -1 : res.filesize,
                                    OldMetaHash = res == null ? null : res.metahash,
                                    OldMetaSize = res == null ? -1 : res.metasize
                                });
                            }
                        }
                        catch (Exception ex)
                        {
                            if (ex.IsRetiredException() || token.IsCancellationRequested)
                            {
                                continue;
                            }
                            Logging.Log.WriteWarningMessage(FILELOGTAG, "ProcessingMetadataFailed", ex,
                                                            "Failed to process entry, path: {0}", path);
                        }
                    }
                }
            }));
        }
Example #8
0
        /// <summary>
        /// Processes the metadata for the given path.
        /// </summary>
        /// <returns><c>True</c> if the path should be submitted to more analysis, <c>false</c> if there is nothing else to do</returns>
        private static async Task <bool> ProcessMetadata(string path, FileAttributes attributes, DateTime lastwrite, Options options, Snapshots.ISnapshotService snapshot, IMetahash emptymetadata, BackupDatabase database, IWriteChannel <StreamBlock> streamblockchannel)
        {
            if (snapshot.IsSymlink(path, attributes))
            {
                // Not all reparse points are symlinks.
                // For example, on Windows 10 Fall Creator's Update, the OneDrive folder (and all subfolders)
                // are reparse points, which allows the folder to hook into the OneDrive service and download things on-demand.
                // If we can't find a symlink target for the current path, we won't treat it as a symlink.
                string symlinkTarget = snapshot.GetSymlinkTarget(path);
                if (!string.IsNullOrWhiteSpace(symlinkTarget))
                {
                    if (options.SymlinkPolicy == Options.SymlinkStrategy.Ignore)
                    {
                        Logging.Log.WriteVerboseMessage(FILELOGTAG, "IgnoreSymlink", "Ignoring symlink {0}", path);
                        return(false);
                    }

                    if (options.SymlinkPolicy == Options.SymlinkStrategy.Store)
                    {
                        var metadata = MetadataGenerator.GenerateMetadata(path, attributes, options, snapshot);

                        if (!metadata.ContainsKey("CoreSymlinkTarget"))
                        {
                            metadata["CoreSymlinkTarget"] = symlinkTarget;
                        }

                        var metahash = Utility.WrapMetadata(metadata, options);
                        await AddSymlinkToOutputAsync(path, DateTime.UtcNow, metahash, database, streamblockchannel).ConfigureAwait(false);

                        Logging.Log.WriteVerboseMessage(FILELOGTAG, "StoreSymlink", "Stored symlink {0}", path);
                        // Don't process further
                        return(false);
                    }
                }
                else
                {
                    Logging.Log.WriteVerboseMessage(FILELOGTAG, "FollowingEmptySymlink", "Treating empty symlink as regular path {0}", path);
                }
            }

            if ((attributes & FileAttributes.Directory) == FileAttributes.Directory)
            {
                IMetahash metahash;

                if (!options.SkipMetadata)
                {
                    metahash = Utility.WrapMetadata(MetadataGenerator.GenerateMetadata(path, attributes, options, snapshot), options);
                }
                else
                {
                    metahash = emptymetadata;
                }

                Logging.Log.WriteVerboseMessage(FILELOGTAG, "AddDirectory", "Adding directory {0}", path);
                await AddFolderToOutputAsync(path, lastwrite, metahash, database, streamblockchannel).ConfigureAwait(false);

                return(false);
            }

            // Regular file, keep going
            return(true);
        }
Example #9
0
        public static Task Run(Snapshots.ISnapshotService snapshot, Options options, BackupDatabase database, long lastfilesetid)
        {
            return(AutomationExtensions.RunTask(new
            {
                Input = Backup.Channels.SourcePaths.ForRead,
                StreamBlockChannel = Channels.StreamBlock.ForWrite,
                Output = Backup.Channels.ProcessedFiles.ForWrite,
            },

                                                async self =>
            {
                var emptymetadata = Utility.WrapMetadata(new Dictionary <string, string>(), options);

                var CHECKFILETIMEONLY = options.CheckFiletimeOnly;
                var DISABLEFILETIMECHECK = options.DisableFiletimeCheck;

                while (true)
                {
                    var path = await self.Input.ReadAsync();

                    var lastwrite = new DateTime(0, DateTimeKind.Utc);
                    var attributes = default(FileAttributes);
                    try
                    {
                        lastwrite = snapshot.GetLastWriteTimeUtc(path);
                    }
                    catch (Exception ex)
                    {
                        Logging.Log.WriteWarningMessage(FILELOGTAG, "TimestampReadFailed", ex, "Failed to read timestamp on \"{0}\"", path);
                    }

                    try
                    {
                        attributes = snapshot.GetAttributes(path);
                    }
                    catch (Exception ex)
                    {
                        Logging.Log.WriteVerboseMessage(FILELOGTAG, "FailedAttributeRead", "Failed to read attributes from {0}: {1}", path, ex.Message);
                    }

                    // If we only have metadata, stop here
                    if (await ProcessMetadata(path, attributes, lastwrite, options, snapshot, emptymetadata, database, self.StreamBlockChannel).ConfigureAwait(false))
                    {
                        try
                        {
                            if (CHECKFILETIMEONLY || DISABLEFILETIMECHECK)
                            {
                                var tmp = await database.GetFileLastModifiedAsync(path, lastfilesetid, false);
                                await self.Output.WriteAsync(new FileEntry()
                                {
                                    OldId = tmp.Item1,
                                    Path = path,
                                    Attributes = attributes,
                                    LastWrite = lastwrite,
                                    OldModified = tmp.Item2,
                                    LastFileSize = tmp.Item3,
                                    OldMetaHash = null,
                                    OldMetaSize = -1
                                });
                            }
                            else
                            {
                                var res = await database.GetFileEntryAsync(path, lastfilesetid);
                                await self.Output.WriteAsync(new FileEntry()
                                {
                                    OldId = res == null ? -1 : res.id,
                                    Path = path,
                                    Attributes = attributes,
                                    LastWrite = lastwrite,
                                    OldModified = res == null ? new DateTime(0) : res.modified,
                                    LastFileSize = res == null ? -1 : res.filesize,
                                    OldMetaHash = res == null ? null : res.metahash,
                                    OldMetaSize = res == null ? -1 : res.metasize
                                });
                            }
                        }
                        catch (Exception ex)
                        {
                            Logging.Log.WriteWarningMessage(FILELOGTAG, "ProcessingMetadataFailed", ex, "Failed to process entry, path: {0}", path);
                        }
                    }
                }
            }));
        }
Example #10
0
        public void Run(string[] sources, Library.Utility.IFilter filter)
        {
            m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_Begin);

            using(m_database = new LocalBackupDatabase(m_options.Dbpath, m_options))
            {
                m_result.SetDatabase(m_database);
                m_result.Dryrun = m_options.Dryrun;

                Utility.VerifyParameters(m_database, m_options);
                m_database.VerifyConsistency(null);
                // If there is no filter, we set an empty filter to simplify the code
                // If there is a filter, we make sure that the sources are included
                m_filter = filter ?? new Library.Utility.FilterExpression();
                m_sourceFilter = new Library.Utility.FilterExpression(sources, true);

                var lastVolumeSize = -1L;
                m_backendLogFlushTimer = DateTime.Now.Add(FLUSH_TIMESPAN);
                System.Threading.Thread parallelScanner = null;

                try
                {
                    m_snapshot = GetSnapshot(sources, m_options, m_result);

                    // Start parallel scan
                    if (m_options.ChangedFilelist == null || m_options.ChangedFilelist.Length < 1)
                    {
                        parallelScanner = new System.Threading.Thread(CountFilesThread) {
                            Name = "Read ahead file counter",
                            IsBackground = true
                        };
                        parallelScanner.Start();
                    }

                    using(m_backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, m_database))
                    using(m_filesetvolume = new FilesetVolumeWriter(m_options, m_database.OperationTimestamp))
                    {
                        var incompleteFilesets = m_database.GetIncompleteFilesets(null).OrderBy(x => x.Value).ToArray();
                        if (incompleteFilesets.Length != 0)
                        {
                            m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_PreviousBackupFinalize);
                            m_result.AddMessage(string.Format("Uploading filelist from previous interrupted backup"));
                            using(var trn = m_database.BeginTransaction())
                            {
                                var incompleteSet = incompleteFilesets.Last();
                                var badIds = from n in incompleteFilesets select n.Key;

                                var prevs = (from n in m_database.FilesetTimes
                                            where
                                                n.Key < incompleteSet.Key
                                                &&
                                                !badIds.Contains(n.Key)
                                            orderby n.Key
                                            select n.Key).ToArray();

                                var prevId = prevs.Length == 0 ? -1 : prevs.Last();

                                FilesetVolumeWriter fsw = null;
                                try
                                {
                                    var s = 1;
                                    var fileTime = incompleteSet.Value + TimeSpan.FromSeconds(s);
                                    var oldFilesetID = incompleteSet.Key;

                                    // Probe for an unused filename
                                    while (s < 60)
                                    {
                                        var id = m_database.GetRemoteVolumeID(VolumeBase.GenerateFilename(RemoteVolumeType.Files, m_options, null, fileTime));
                                        if (id < 0)
                                            break;

                                        fileTime = incompleteSet.Value + TimeSpan.FromSeconds(++s);
                                    }

                                    fsw = new FilesetVolumeWriter(m_options, fileTime);
                                    fsw.VolumeID = m_database.RegisterRemoteVolume(fsw.RemoteFilename, RemoteVolumeType.Files, RemoteVolumeState.Temporary, m_transaction);
                                    var newFilesetID = m_database.CreateFileset(fsw.VolumeID, fileTime, trn);
                                    m_database.LinkFilesetToVolume(newFilesetID, fsw.VolumeID, trn);
                                    m_database.AppendFilesFromPreviousSet(trn, null, newFilesetID, prevId, fileTime);

                                    m_database.WriteFileset(fsw, trn, newFilesetID);

                                    if (m_options.Dryrun)
                                    {
                                        m_result.AddDryrunMessage(string.Format("Would upload fileset: {0}, size: {1}", fsw.RemoteFilename, Library.Utility.Utility.FormatSizeString(new FileInfo(fsw.LocalFilename).Length)));
                                    }
                                    else
                                    {
                                        m_database.UpdateRemoteVolume(fsw.RemoteFilename, RemoteVolumeState.Uploading, -1, null, trn);

                                        using(new Logging.Timer("CommitUpdateFilelistVolume"))
                                            trn.Commit();

                                        m_backend.Put(fsw);
                                        fsw = null;
                                    }
                                }
                                finally
                                {
                                    if (fsw != null)
                                        try { fsw.Dispose(); }
                                        catch { fsw = null; }
                                }
                            }
                        }

                        if (!m_options.NoBackendverification)
                        {
                            m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_PreBackupVerify);
                            using(new Logging.Timer("PreBackupVerify"))
                            {
                                try
                                {
                                    FilelistProcessor.VerifyRemoteList(m_backend, m_options, m_database, m_result.BackendWriter);
                                }
                                catch (Exception ex)
                                {
                                    if (m_options.AutoCleanup)
                                    {
                                        m_result.AddWarning("Backend verification failed, attempting automatic cleanup", ex);
                                        m_result.RepairResults = new RepairResults(m_result);
                                        new RepairHandler(m_backend.BackendUrl, m_options, (RepairResults)m_result.RepairResults).Run();

                                        m_result.AddMessage("Backend cleanup finished, retrying verification");
                                        FilelistProcessor.VerifyRemoteList(m_backend, m_options, m_database, m_result.BackendWriter);
                                    }
                                    else
                                        throw;
                                }
                            }
                        }

                        m_database.BuildLookupTable(m_options);
                        m_transaction = m_database.BeginTransaction();

                        m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_ProcessingFiles);
                        var filesetvolumeid = m_database.RegisterRemoteVolume(m_filesetvolume.RemoteFilename, RemoteVolumeType.Files, RemoteVolumeState.Temporary, m_transaction);
                        m_database.CreateFileset(filesetvolumeid, VolumeBase.ParseFilename(m_filesetvolume.RemoteFilename).Time, m_transaction);

                        m_blockvolume = new BlockVolumeWriter(m_options);
                        m_blockvolume.VolumeID = m_database.RegisterRemoteVolume(m_blockvolume.RemoteFilename, RemoteVolumeType.Blocks, RemoteVolumeState.Temporary, m_transaction);

                        if (m_options.IndexfilePolicy != Options.IndexFileStrategy.None)
                        {
                            m_indexvolume = new IndexVolumeWriter(m_options);
                            m_indexvolume.VolumeID = m_database.RegisterRemoteVolume(m_indexvolume.RemoteFilename, RemoteVolumeType.Index, RemoteVolumeState.Temporary, m_transaction);
                        }

                        var filterhandler = new FilterHandler(m_snapshot, m_attributeFilter, m_sourceFilter, m_filter, m_symlinkPolicy, m_options.HardlinkPolicy, m_result);

                        using(new Logging.Timer("BackupMainOperation"))
                        {
                            if (m_options.ChangedFilelist != null && m_options.ChangedFilelist.Length >= 1)
                            {
                                m_result.AddVerboseMessage("Processing supplied change list instead of enumerating filesystem");
                                m_result.OperationProgressUpdater.UpdatefileCount(m_options.ChangedFilelist.Length, 0, true);

                                foreach(var p in m_options.ChangedFilelist)
                                {
                                    if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                                    {
                                        m_result.AddMessage("Stopping backup operation on request");
                                        break;
                                    }

                                    FileAttributes fa = new FileAttributes();
                                    try
                                    {
                                        fa = m_snapshot.GetAttributes(p);
                                    }
                                    catch (Exception ex)
                                    {
                                        m_result.AddWarning(string.Format("Failed to read attributes: {0}, message: {1}", p, ex.Message), ex);
                                    }

                                    if (filterhandler.AttributeFilter(null, p, fa))
                                    {
                                        try
                                        {
                                            this.HandleFilesystemEntry(p, fa);
                                        }
                                        catch (Exception ex)
                                        {
                                            m_result.AddWarning(string.Format("Failed to process element: {0}, message: {1}", p, ex.Message), ex);
                                        }
                                    }
                                }

                                m_database.AppendFilesFromPreviousSet(m_transaction, m_options.DeletedFilelist);
                            }
                            else
                            {
                                foreach(var path in m_snapshot.EnumerateFilesAndFolders(filterhandler.AttributeFilter))
                                {
                                    if (m_result.TaskControlRendevouz() == TaskControlState.Stop)
                                    {
                                        m_result.AddMessage("Stopping backup operation on request");
                                        break;
                                    }

                                    this.HandleFilesystemEntry(path, m_snapshot.GetAttributes(path));
                                }

                            }

                            //If the scanner is still running for some reason, make sure we kill it now
                            if (parallelScanner != null && parallelScanner.IsAlive)
                                parallelScanner.Abort();

                            // We no longer need to snapshot active
                            try { m_snapshot.Dispose(); }
                            finally { m_snapshot = null; }

                            m_result.OperationProgressUpdater.UpdatefileCount(m_result.ExaminedFiles, m_result.SizeOfExaminedFiles, true);
                        }

                        m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_Finalize);
                        using(new Logging.Timer("FinalizeRemoteVolumes"))
                        {
                            if (m_blockvolume.SourceSize > 0)
                            {
                                lastVolumeSize = m_blockvolume.SourceSize;

                                if (m_options.Dryrun)
                                {
                                    m_result.AddDryrunMessage(string.Format("Would upload block volume: {0}, size: {1}", m_blockvolume.RemoteFilename, Library.Utility.Utility.FormatSizeString(new FileInfo(m_blockvolume.LocalFilename).Length)));
                                    if (m_indexvolume != null)
                                    {
                                        m_blockvolume.Close();
                                        UpdateIndexVolume();
                                        m_indexvolume.FinishVolume(Library.Utility.Utility.CalculateHash(m_blockvolume.LocalFilename), new FileInfo(m_blockvolume.LocalFilename).Length);
                                        m_result.AddDryrunMessage(string.Format("Would upload index volume: {0}, size: {1}", m_indexvolume.RemoteFilename, Library.Utility.Utility.FormatSizeString(new FileInfo(m_indexvolume.LocalFilename).Length)));
                                    }

                                    m_blockvolume.Dispose();
                                    m_blockvolume = null;
                                    m_indexvolume.Dispose();
                                    m_indexvolume = null;
                                }
                                else
                                {
                                    m_database.UpdateRemoteVolume(m_blockvolume.RemoteFilename, RemoteVolumeState.Uploading, -1, null, m_transaction);
                                    m_blockvolume.Close();
                                    UpdateIndexVolume();

                                    using(new Logging.Timer("CommitUpdateRemoteVolume"))
                                        m_transaction.Commit();
                                    m_transaction = m_database.BeginTransaction();

                                    m_backend.Put(m_blockvolume, m_indexvolume);

                                    m_blockvolume = null;
                                    m_indexvolume = null;
                                }
                            }
                            else
                            {
                                m_database.RemoveRemoteVolume(m_blockvolume.RemoteFilename, m_transaction);
                                if (m_indexvolume != null)
                                    m_database.RemoveRemoteVolume(m_indexvolume.RemoteFilename, m_transaction);
                            }
                        }

                        using(new Logging.Timer("UpdateChangeStatistics"))
                            m_database.UpdateChangeStatistics(m_result);
                        using(new Logging.Timer("VerifyConsistency"))
                            m_database.VerifyConsistency(m_transaction);

                        var changeCount =
                            m_result.AddedFiles + m_result.ModifiedFiles + m_result.DeletedFiles +
                            m_result.AddedFolders + m_result.ModifiedFolders + m_result.DeletedFolders +
                            m_result.AddedSymlinks + m_result.ModifiedSymlinks + m_result.DeletedSymlinks;

                        //Changes in the filelist triggers a filelist upload
                        if (m_options.UploadUnchangedBackups || changeCount > 0)
                        {
                            using(new Logging.Timer("Uploading a new fileset"))
                            {
                                if (!string.IsNullOrEmpty(m_options.ControlFiles))
                                    foreach(var p in m_options.ControlFiles.Split(new char[] { System.IO.Path.PathSeparator }, StringSplitOptions.RemoveEmptyEntries))
                                        m_filesetvolume.AddControlFile(p, m_options.GetCompressionHintFromFilename(p));

                                m_database.WriteFileset(m_filesetvolume, m_transaction);
                                m_filesetvolume.Close();

                                if (m_options.Dryrun)
                                    m_result.AddDryrunMessage(string.Format("Would upload fileset volume: {0}, size: {1}", m_filesetvolume.RemoteFilename, Library.Utility.Utility.FormatSizeString(new FileInfo(m_filesetvolume.LocalFilename).Length)));
                                else
                                {
                                    m_database.UpdateRemoteVolume(m_filesetvolume.RemoteFilename, RemoteVolumeState.Uploading, -1, null, m_transaction);

                                    using(new Logging.Timer("CommitUpdateRemoteVolume"))
                                        m_transaction.Commit();
                                    m_transaction = m_database.BeginTransaction();

                                    m_backend.Put(m_filesetvolume);
                                }
                            }
                        }
                        else
                        {
                            m_result.AddVerboseMessage("removing temp files, as no data needs to be uploaded");
                            m_database.RemoveRemoteVolume(m_filesetvolume.RemoteFilename, m_transaction);
                        }

                        m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_WaitForUpload);
                        using(new Logging.Timer("Async backend wait"))
                            m_backend.WaitForComplete(m_database, m_transaction);

                        if (m_result.TaskControlRendevouz() != TaskControlState.Stop)
                        {
                            if (m_options.KeepTime.Ticks > 0 || m_options.KeepVersions != 0)
                            {
                                m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_Delete);
                                m_result.DeleteResults = new DeleteResults(m_result);
                                using(var db = new LocalDeleteDatabase(m_database))
                                    new DeleteHandler(m_backend.BackendUrl, m_options, (DeleteResults)m_result.DeleteResults).DoRun(db, m_transaction, true, lastVolumeSize <= m_options.SmallFileSize);

                            }
                            else if (lastVolumeSize <= m_options.SmallFileSize && !m_options.NoAutoCompact)
                            {
                                m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_Compact);
                                m_result.CompactResults = new CompactResults(m_result);
                                using(var db = new LocalDeleteDatabase(m_database))
                                    new CompactHandler(m_backend.BackendUrl, m_options, (CompactResults)m_result.CompactResults).DoCompact(db, true, m_transaction);
                            }
                        }

                        if (m_options.UploadVerificationFile)
                        {
                            m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_VerificationUpload);
                            FilelistProcessor.UploadVerificationFile(m_backend.BackendUrl, m_options, m_result.BackendWriter, m_database, m_transaction);
                        }

                        if (m_options.Dryrun)
                        {
                            m_transaction.Rollback();
                            m_transaction = null;
                        }
                        else
                        {
                            using(new Logging.Timer("CommitFinalizingBackup"))
                                m_transaction.Commit();

                            m_transaction = null;
                            m_database.Vacuum();

                            if (m_result.TaskControlRendevouz() != TaskControlState.Stop && !m_options.NoBackendverification)
                            {
                                m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_PostBackupVerify);
                                using(var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, m_database))
                                {
                                    using(new Logging.Timer("AfterBackupVerify"))
                                        FilelistProcessor.VerifyRemoteList(backend, m_options, m_database, m_result.BackendWriter);
                                    backend.WaitForComplete(m_database, null);
                                }

                                if (m_options.BackupTestSampleCount > 0 && m_database.GetRemoteVolumes().Count() > 0)
                                {
                                    m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_PostBackupTest);
                                    m_result.TestResults = new TestResults(m_result);

                                    using(var testdb = new LocalTestDatabase(m_database))
                                    using(var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, testdb))
                                        new TestHandler(m_backendurl, m_options, new TestResults(m_result))
                                            .DoRun(m_options.BackupTestSampleCount, testdb, backend);
                                }
                            }

                        }

                        m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_Complete);
                        m_database.WriteResults();
                        return;
                    }
                }
                catch (Exception ex)
                {
                    m_result.AddError("Fatal error", ex);
                    throw;
                }
                finally
                {
                    if (parallelScanner != null && parallelScanner.IsAlive)
                    {
                        parallelScanner.Abort();
                        parallelScanner.Join(500);
                        if (parallelScanner.IsAlive)
                            m_result.AddWarning("Failed to terminate filecounter thread", null);
                    }

                    if (m_snapshot != null)
                        try { m_snapshot.Dispose(); }
                        catch (Exception ex) { m_result.AddError(string.Format("Failed to dispose snapshot"), ex); }
                        finally { m_snapshot = null; }

                    if (m_transaction != null)
                        try { m_transaction.Rollback(); }
                        catch (Exception ex) { m_result.AddError(string.Format("Rollback error: {0}", ex.Message), ex); }
                }
            }
        }
Example #11
0
        /// <summary>
        /// Initiates creating a content/signature pair.
        /// </summary>
        /// <param name="full">True if the set is a full backup, false if it is incremental</param>
        /// <param name="options">Any setup options to use</param>
        public void InitiateMultiPassDiff(bool full, Options options)
        {
            if (full)
            {
                m_oldFolders = new Dictionary<string, DateTime>();
                m_oldSignatures = new Dictionary<string, ArchiveWrapper>();
                m_oldSymlinks = new Dictionary<string, string>();
            }

            m_newfiles = new Dictionary<string, string>();
            m_modifiedFiles = new Dictionary<string, string>();
            m_deletedfiles = new List<string>();
            m_newfolders = new List<KeyValuePair<string, DateTime>>();
            m_updatedfolders = new List<KeyValuePair<string, DateTime>>();
            m_deletedfolders = new List<string>();
            m_checkedUnchangedFiles = new List<string>();
            m_lastPartialFile = null;
            m_openfilepolicy = Options.OpenFileStrategy.Ignore;

            try
            {
                if (options.SnapShotStrategy != Options.OptimizationStrategy.Off)
                    m_snapshot = XervBackup.Library.Snapshots.SnapshotUtility.CreateSnapshot(m_sourcefolder, options.RawOptions);
            }
            catch (Exception ex)
            {
                if (options.SnapShotStrategy == Options.OptimizationStrategy.Required)
                    throw;
                else if (options.SnapShotStrategy == Options.OptimizationStrategy.On)
                {
                    if (m_stat != null)
                        m_stat.LogWarning(string.Format(Strings.RSyncDir.SnapshotFailedError, ex.ToString()), ex);
                }
            }

            //Failsafe, just use a plain implementation
            if (m_snapshot == null)
            {
                m_snapshot = Utility.Utility.IsClientLinux ?
                    (Library.Snapshots.ISnapshotService)new XervBackup.Library.Snapshots.NoSnapshotLinux(m_sourcefolder, options.RawOptions)
                        :
                    (Library.Snapshots.ISnapshotService)new XervBackup.Library.Snapshots.NoSnapshotWindows(m_sourcefolder, options.RawOptions);
                m_openfilepolicy = options.OpenFilePolicy;
            }

            Dictionary<string, Snapshots.USNHelper> usnHelpers = null;
            List<string> unchanged = new List<string>();
            m_unproccesed = new PathCollector(m_snapshot, options.SymlinkPolicy, options.FileAttributeFilter, m_filter, m_stat);

            try
            {
                if (options.UsnStrategy != Options.OptimizationStrategy.Off)
                {
                    if (Utility.Utility.IsClientLinux && options.UsnStrategy != Options.OptimizationStrategy.Auto)
                        throw new Exception(Strings.RSyncDir.UsnNotSupportedOnLinuxError);

                    /*
                    if (options.DisableUSNDiffCheck)
                        m_lastUSN = null;
                    */

                    usnHelpers = new Dictionary<string, XervBackup.Library.Snapshots.USNHelper>(Utility.Utility.ClientFilenameStringComparer);
                    foreach (string s in m_sourcefolder)
                    {
                        string rootFolder = System.IO.Path.GetPathRoot(s);
                        if (!usnHelpers.ContainsKey(rootFolder))
                            try { usnHelpers[rootFolder] = new XervBackup.Library.Snapshots.USNHelper(rootFolder); }
                            catch (Exception ex)
                            {
                                if (options.UsnStrategy == Options.OptimizationStrategy.Required)
                                    throw;
                                else if (options.UsnStrategy == Options.OptimizationStrategy.On)
                                {
                                    if (m_stat != null)
                                        m_stat.LogWarning(string.Format(Strings.RSyncDir.UsnFailedError, ex.ToString()), ex);
                                }
                            }

                        if (usnHelpers.ContainsKey(rootFolder))
                        {
                            //This code is broken, see issue 332:
                            //http://code.google.com/p/XervBackup/issues/detail?id=332

                            /* if (m_lastUSN != null && m_lastUSN.ContainsKey(rootFolder))
                            {
                                if (m_lastUSN[rootFolder].Key != usnHelpers[rootFolder].JournalID)
                                {
                                    if (m_stat != null)
                                        m_stat.LogWarning(string.Format(Strings.RSyncDir.UsnJournalIdChangedWarning, rootFolder, m_lastUSN[rootFolder].Key, usnHelpers[rootFolder].JournalID), null);

                                    //Just take it all again
                                    usnHelpers[rootFolder].EnumerateFilesAndFolders(s, m_filter, m_unproccesed.Callback);
                                }
                                else if (m_lastUSN[rootFolder].Value > usnHelpers[rootFolder].USN)
                                {
                                    if (m_stat != null)
                                        m_stat.LogWarning(string.Format(Strings.RSyncDir.UsnNumberingFaultWarning, rootFolder, m_lastUSN[rootFolder].Value, usnHelpers[rootFolder].USN), null);

                                    //Just take it all again
                                    usnHelpers[rootFolder].EnumerateFilesAndFolders(s, m_filter, m_unproccesed.Callback);
                                }
                                else //All good we rely on USN numbers to find a list of changed files
                                {
                                    //Find all changed files and folders
                                    Dictionary<string, string> tmp = new Dictionary<string, string>(Utility.Utility.ClientFilenameStringComparer);
                                    foreach (string sx in usnHelpers[rootFolder].GetChangedFileSystemEntries(s, m_lastUSN[rootFolder].Value))
                                    {
                                        tmp.Add(sx, null);

                                        if (!sx.EndsWith(System.IO.Path.DirectorySeparatorChar.ToString()))
                                            m_unproccesed.Files.Add(sx);
                                    }

                                    //Remove the existing unchanged ones
                                    foreach (string x in usnHelpers[rootFolder].GetFileSystemEntries(s))
                                        if (!tmp.ContainsKey(x))
                                            unchanged.Add(x);
                                }
                            }
                            else */
                            {
                                usnHelpers[rootFolder].EnumerateFilesAndFolders(s, m_unproccesed.Callback);
                            }
                        }

                    }

                    if (usnHelpers.Count > 0)
                    {
                        m_currentUSN = new USNRecord();
                        foreach (KeyValuePair<string, Snapshots.USNHelper> kx in usnHelpers)
                            m_currentUSN.Add(kx.Key, kx.Value.JournalID, kx.Value.USN);
                    }
                }
            }
            catch (Exception ex)
            {
                if (options.UsnStrategy == Options.OptimizationStrategy.Required)
                    throw;
                else if (options.UsnStrategy == Options.OptimizationStrategy.On)
                {
                    if (m_stat != null)
                        m_stat.LogWarning(string.Format(Strings.RSyncDir.UsnFailedError, ex.ToString()), ex);
                }

                //If we get here, something went really wrong with USN, so we disable it
                m_currentUSN = null;
                m_unproccesed = new PathCollector(m_snapshot, options.SymlinkPolicy, options.FileAttributeFilter, m_filter, m_stat);
                unchanged = null;
            }
            finally
            {
                if (usnHelpers != null)
                    foreach(Snapshots.USNHelper h in usnHelpers.Values)
                        try { h.Dispose(); }
                        catch (Exception ex)
                        {
                            if (m_stat != null)
                                m_stat.LogWarning(string.Format(Strings.RSyncDir.UsnDisposeFailedWarning, ex.ToString()), ex);
                        }
            }

            if (m_currentUSN == null)
            {
                m_snapshot.EnumerateFilesAndFolders(m_unproccesed.Callback);
            }
            else
            {
                //Skip all items that we know are unchanged
                foreach (string x in unchanged)
                {
                    string relpath = GetRelativeName(x);
                    m_oldSignatures.Remove(relpath);
                    m_oldFolders.Remove(relpath);
                    m_oldSymlinks.Remove(relpath);
                }

                //If some folders did not support USN, add their files now
                foreach (string s in m_sourcefolder)
                    if (!m_currentUSN.ContainsKey(System.IO.Path.GetPathRoot(s)))
                        m_snapshot.EnumerateFilesAndFolders(s, m_unproccesed.Callback);
            }

            m_totalfiles = m_unproccesed.Files.Count;
            m_isfirstmultipass = true;

            if (options.ExcludeEmptyFolders)
            {
                //We remove the folders that have no files.
                //It would be more optimal to exclude them from the list before this point,
                // but that would require rewriting of the snapshots

                //We can't rely on the order of the folders, so we sort them to get the shortest folder names first
                m_unproccesed.Folders.Sort(Utility.Utility.ClientFilenameStringComparer);

                //We can't rely on the order of the files either, but sorting them allows us to use a O=log(n) search rather than O=n
                m_unproccesed.Files.Sort(Utility.Utility.ClientFilenameStringComparer);

                for (int i = 0; i < m_unproccesed.Folders.Count; i++)
                {
                    string folder = m_unproccesed.Folders[i];
                    int ix = m_unproccesed.Files.BinarySearch(folder, Utility.Utility.ClientFilenameStringComparer);
                    if (ix >= 0)
                        continue; //Should not happen, means that a file has the same name as a folder

                    //Get the next index larger than the foldername
                    ix = ~ix;

                    if (ix >= m_unproccesed.Files.Count)
                    {
                        //No files matched
                        m_unproccesed.Folders.RemoveAt(i);
                        i--;
                    }
                    else
                    {
                        //If the element does not start with the foldername, no files from the folder are included
                        if (!m_unproccesed.Files[ix].StartsWith(folder, Utility.Utility.ClientFilenameStringComparision))
                        {
                            //Speedup, remove all subfolders as well without performing binary searches
                            while (i < m_unproccesed.Folders.Count && m_unproccesed.Folders[i].StartsWith(folder))
                                m_unproccesed.Folders.RemoveAt(i);

                            //We have removed at least one, so adjust the loop counter
                            i--;
                        }
                    }
                }
            }

            //Build folder diffs
            foreach(string s in m_unproccesed.Folders)
            {
                try
                {
                    string relpath = GetRelativeName(s);
                    if (relpath.Trim().Length != 0)
                    {
                        DateTime lastWrite = m_snapshot.GetLastWriteTime(s).ToUniversalTime();

                        //Cut off as we only have seconds stored
                        lastWrite = new DateTime(lastWrite.Year, lastWrite.Month, lastWrite.Day, lastWrite.Hour, lastWrite.Minute, lastWrite.Second, DateTimeKind.Utc);

                        if (!m_oldFolders.ContainsKey(relpath))
                            m_newfolders.Add(new KeyValuePair<string, DateTime>(relpath, lastWrite));
                        else
                        {
                            if (m_oldFolders[relpath] != lastWrite)
                                m_updatedfolders.Add(new KeyValuePair<string, DateTime>(relpath, lastWrite));
                            m_oldFolders.Remove(relpath);
                        }
                    }
                }
                catch (Exception ex)
                {
                    m_unproccesed.Errors.Add(s);
                    m_stat.LogError(string.Format(Strings.RSyncDir.FolderModificationTimeReadError, s, ex.Message), ex);
                }
            }

            m_unproccesed.Folders.Clear();
            foreach(string s in m_oldFolders.Keys)
                if (!m_unproccesed.IsAffectedByError(s))
                    m_deletedfolders.Add(s);

             //Build symlink diffs
            if (m_oldSymlinks.Count > 0)
            {
                for (int i = m_unproccesed.Symlinks.Count - 1; i >= 0; i--)
                {
                    string s = m_unproccesed.Symlinks[i].Key;
                    try
                    {
                        string relpath = GetRelativeName(s);
                        if (relpath.Trim().Length != 0)
                        {
                            string oldLink;
                            if (m_oldSymlinks.TryGetValue(relpath, out oldLink))
                            {
                                m_oldSymlinks.Remove(relpath);
                                if (string.Equals(oldLink, m_unproccesed.Symlinks[i].Value, Utility.Utility.ClientFilenameStringComparision))
                                {
                                    m_unproccesed.Symlinks.RemoveAt(i);
                                }
                            }
                        }
                    }
                    catch (Exception ex)
                    {
                        m_unproccesed.Errors.Add(s);
                        m_stat.LogError(string.Format(Strings.RSyncDir.SymlinkReadError, s, ex.Message), ex);
                    }
                }
            }

            m_deletedfiles.AddRange(m_oldSymlinks.Keys);
            m_oldSymlinks.Clear();

            m_sortedfilelist = options.SortedFilelist;
            if (m_sortedfilelist)
                m_unproccesed.Files.Sort(Utility.Utility.ClientFilenameStringComparer);
        }
Example #12
0
        public void Dispose()
        {
            if (m_lastPartialFile != null)
            {
                try { m_lastPartialFile.Dispose(); }
                catch { }
                m_lastPartialFile = null;
            }

            if (m_patches != null)
            {
                foreach (Library.Interface.ICompression arc in m_patches)
                    try { arc.Dispose(); }
                    catch { }
                m_patches = null;
            }

            if (m_partialDeltas != null)
            {
                foreach (Utility.TempFile tf in m_partialDeltas.Values)
                    try
                    {
                        if (tf != null)
                            tf.Dispose();
                    }
                    catch { }
                m_partialDeltas = null;
            }

            if (m_stat is BackupStatistics)
            {
                BackupStatistics bs = m_stat as BackupStatistics;

                if (m_deletedfiles != null)
                    bs.DeletedFiles = m_deletedfiles.Count;
                if (m_deletedfolders != null)
                    bs.DeletedFolders = m_deletedfolders.Count;
                bs.ModifiedFiles = m_diffedfiles;
                bs.AddedFiles = m_addedfiles;
                bs.ExaminedFiles = m_examinedfiles;
                bs.OpenedFiles = m_filesopened;
                bs.SizeOfModifiedFiles = m_diffedfilessize;
                bs.SizeOfAddedFiles = m_addedfilessize;
                bs.SizeOfExaminedFiles = m_examinedfilesize;
                if (m_unproccesed != null && m_unproccesed.Files != null)
                    bs.UnprocessedFiles = m_unproccesed.Files.Count;
                if (m_newfolders != null)
                    bs.AddedFolders = m_newfolders.Count;
            }

            if (m_snapshot != null)
            {
                m_snapshot.Dispose();
                m_snapshot = null;
            }
        }
Example #13
0
 public PathCollector(Snapshots.ISnapshotService snapshot, Options.SymlinkStrategy symlinkHandling, FileAttributes attributeMask, Utility.FilenameFilter filter, CommunicationStatistics stat)
 {
     m_symlinkHandling = symlinkHandling;
     m_filter = filter;
     m_attributeMask = attributeMask;
     m_snapshot = snapshot;
     m_stat = stat;
 }
Example #14
0
        public static Task Run(Snapshots.ISnapshotService snapshot, Options options, BackupStatsCollector stats, BackupDatabase database)
        {
            return(AutomationExtensions.RunTask(
                       new
            {
                Input = Channels.ProcessedFiles.ForRead,
                ProgressChannel = Channels.ProgressEvents.ForWrite,
                Output = Channels.AcceptedChangedFile.ForWrite
            },

                       async self =>
            {
                var EMPTY_METADATA = Utility.WrapMetadata(new Dictionary <string, string>(), options);

                // Pre-cache the option variables here to simplify and
                // speed up repeated option access below

                var SKIPFILESLARGERTHAN = options.SkipFilesLargerThan;
                // Zero and max both indicate no size limit
                if (SKIPFILESLARGERTHAN == long.MaxValue)
                {
                    SKIPFILESLARGERTHAN = 0;
                }

                var DISABLEFILETIMECHECK = options.DisableFiletimeCheck;
                var CHECKFILETIMEONLY = options.CheckFiletimeOnly;
                var SKIPMETADATA = options.SkipMetadata;

                while (true)
                {
                    var e = await self.Input.ReadAsync();

                    long filestatsize = -1;
                    try
                    {
                        filestatsize = snapshot.GetFileSize(e.Path);
                    }
                    catch (Exception ex)
                    {
                        Logging.Log.WriteExplicitMessage(FILELOGTAG, "FailedToReadSize", ex, "Failed to read size of file: {0}", e.Path);
                    }

                    await stats.AddExaminedFile(filestatsize);

                    // Stop now if the file is too large
                    var tooLargeFile = SKIPFILESLARGERTHAN != 0 && filestatsize >= 0 && filestatsize > SKIPFILESLARGERTHAN;
                    if (tooLargeFile)
                    {
                        Logging.Log.WriteVerboseMessage(FILELOGTAG, "SkipCheckTooLarge", "Skipped checking file, because the size exceeds limit {0}", e.Path);
                        await self.ProgressChannel.WriteAsync(new ProgressEvent()
                        {
                            Filepath = e.Path, Length = filestatsize, Type = EventType.FileSkipped
                        });
                        continue;
                    }

                    // Invalid ID indicates a new file
                    var isNewFile = e.OldId < 0;

                    // If we disable the filetime check, we always assume that the file has changed
                    // Otherwise we check that the timestamps are different or if any of them are empty
                    var timestampChanged = DISABLEFILETIMECHECK || e.LastWrite != e.OldModified || e.LastWrite.Ticks == 0 || e.OldModified.Ticks == 0;

                    // Avoid generating a new matadata blob if timestamp has not changed
                    // and we only check for timestamp changes
                    if (CHECKFILETIMEONLY && !timestampChanged && !isNewFile)
                    {
                        Logging.Log.WriteVerboseMessage(FILELOGTAG, "SkipCheckNoTimestampChange", "Skipped checking file, because timestamp was not updated {0}", e.Path);
                        try
                        {
                            await database.AddUnmodifiedAsync(e.OldId, e.LastWrite);
                        }
                        catch (Exception ex)
                        {
                            if (ex.IsRetiredException())
                            {
                                throw;
                            }
                            Logging.Log.WriteWarningMessage(FILELOGTAG, "FailedToAddFile", ex, "Failed while attempting to add unmodified file to database: {0}", e.Path);
                        }
                        await self.ProgressChannel.WriteAsync(new ProgressEvent()
                        {
                            Filepath = e.Path, Length = filestatsize, Type = EventType.FileSkipped
                        });
                        continue;
                    }

                    // If we have have disabled the filetime check, we do not have the metadata info
                    // but we want to know if the metadata is potentially changed
                    if (!isNewFile && DISABLEFILETIMECHECK)
                    {
                        var tp = await database.GetMetadataHashAndSizeForFileAsync(e.OldId);
                        if (tp != null)
                        {
                            e.OldMetaSize = tp.Item1;
                            e.OldMetaHash = tp.Item2;
                        }
                    }

                    // Compute current metadata
                    e.MetaHashAndSize = SKIPMETADATA ? EMPTY_METADATA : Utility.WrapMetadata(MetadataGenerator.GenerateMetadata(e.Path, e.Attributes, options, snapshot), options);
                    e.MetadataChanged = !SKIPMETADATA && (e.MetaHashAndSize.Blob.Length != e.OldMetaSize || e.MetaHashAndSize.FileHash != e.OldMetaHash);

                    // Check if the file is new, or something indicates a change
                    var filesizeChanged = filestatsize < 0 || e.LastFileSize < 0 || filestatsize != e.LastFileSize;
                    if (isNewFile || timestampChanged || filesizeChanged || e.MetadataChanged)
                    {
                        Logging.Log.WriteVerboseMessage(FILELOGTAG, "CheckFileForChanges", "Checking file for changes {0}, new: {1}, timestamp changed: {2}, size changed: {3}, metadatachanged: {4}, {5} vs {6}", e.Path, isNewFile, timestampChanged, filesizeChanged, e.MetadataChanged, e.LastWrite, e.OldModified);
                        await self.Output.WriteAsync(e);
                    }
                    else
                    {
                        Logging.Log.WriteVerboseMessage(FILELOGTAG, "SkipCheckNoMetadataChange", "Skipped checking file, because no metadata was updated {0}", e.Path);
                        try
                        {
                            await database.AddUnmodifiedAsync(e.OldId, e.LastWrite);
                        }
                        catch (Exception ex)
                        {
                            Logging.Log.WriteWarningMessage(FILELOGTAG, "FailedToAddFile", ex, "Failed while attempting to add unmodified file to database: {0}", e.Path);
                        }
                        await self.ProgressChannel.WriteAsync(new ProgressEvent()
                        {
                            Filepath = e.Path, Length = filestatsize, Type = EventType.FileSkipped
                        });
                    }
                }
            }));
        }
Example #15
0
        public static Task Run(Snapshots.ISnapshotService snapshot, Options options, BackupDatabase database, BackupStatsCollector stats, ITaskReader taskreader)
        {
            return(AutomationExtensions.RunTask(
                       new
            {
                Input = Channels.AcceptedChangedFile.ForRead,
                StreamBlockChannel = Channels.StreamBlock.ForWrite,
            },

                       async self =>
            {
                var blocksize = options.Blocksize;

                while (await taskreader.ProgressAsync)
                {
                    var e = await self.Input.ReadAsync();

                    try
                    {
                        var hint = options.GetCompressionHintFromFilename(e.Path);
                        var oldHash = e.OldId < 0 ? null : await database.GetFileHashAsync(e.OldId);

                        StreamProcessResult filestreamdata;

                        // Process metadata and actual data in parallel
                        var metatask =
                            Task.Run(async() =>
                        {
                            // If we have determined that metadata has not changed, just grab the ID
                            if (!e.MetadataChanged)
                            {
                                var res = await database.GetMetadataIDAsync(e.MetaHashAndSize.FileHash, e.MetaHashAndSize.Blob.Length);
                                if (res.Item1)
                                {
                                    return res.Item2;
                                }

                                Logging.Log.WriteWarningMessage(FILELOGTAG, "UnexpextedMetadataLookup", null, "Metadata was reported as not changed, but still requires being added?\nHash: {0}, Length: {1}, ID: {2}, Path: {3}", e.MetaHashAndSize.FileHash, e.MetaHashAndSize.Blob.Length, res.Item2, e.Path);
                                e.MetadataChanged = true;
                            }

                            return (await MetadataPreProcess.AddMetadataToOutputAsync(e.Path, e.MetaHashAndSize, database, self.StreamBlockChannel)).Item2;
                        });

                        using (var fs = snapshot.OpenRead(e.Path))
                            filestreamdata = await StreamBlock.ProcessStream(self.StreamBlockChannel, e.Path, fs, false, hint);

                        await stats.AddOpenedFile(filestreamdata.Streamlength);

                        var metadataid = await metatask;
                        var filekey = filestreamdata.Streamhash;
                        var filesize = filestreamdata.Streamlength;

                        if (oldHash != filekey)
                        {
                            if (oldHash == null)
                            {
                                Logging.Log.WriteVerboseMessage(FILELOGTAG, "NewFile", "New file {0}", e.Path);
                            }
                            else
                            {
                                Logging.Log.WriteVerboseMessage(FILELOGTAG, "ChangedFile", "File has changed {0}", e.Path);
                            }

                            if (e.OldId < 0)
                            {
                                await stats.AddAddedFile(filesize);

                                if (options.Dryrun)
                                {
                                    Logging.Log.WriteVerboseMessage(FILELOGTAG, "WoudlAddNewFile", "Would add new file {0}, size {1}", e.Path, Library.Utility.Utility.FormatSizeString(filesize));
                                }
                            }
                            else
                            {
                                await stats.AddModifiedFile(filesize);

                                if (options.Dryrun)
                                {
                                    Logging.Log.WriteVerboseMessage(FILELOGTAG, "WoudlAddChangedFile", "Would add changed file {0}, size {1}", e.Path, Library.Utility.Utility.FormatSizeString(filesize));
                                }
                            }

                            await database.AddFileAsync(e.Path, e.LastWrite, filestreamdata.Blocksetid, metadataid);
                        }
                        else if (e.MetadataChanged)
                        {
                            Logging.Log.WriteVerboseMessage(FILELOGTAG, "FileMetadataChanged", "File has only metadata changes {0}", e.Path);
                            await database.AddFileAsync(e.Path, e.LastWrite, filestreamdata.Blocksetid, metadataid);
                        }
                        else /*if (e.OldId >= 0)*/
                        {
                            // When we write the file to output, update the last modified time
                            Logging.Log.WriteVerboseMessage(FILELOGTAG, "NoFileChanges", "File has not changed {0}", e.Path);

                            try
                            {
                                await database.AddUnmodifiedAsync(e.OldId, e.LastWrite);
                            }
                            catch (Exception ex)
                            {
                                Logging.Log.WriteWarningMessage(FILELOGTAG, "FailedToAddFile", ex, "Failed while attempting to add unmodified file to database: {0}", e.Path);
                            }
                        }
                    }
                    catch (Exception ex)
                    {
                        if (ex.IsRetiredException())
                        {
                            return;
                        }
                        else
                        {
                            Logging.Log.WriteWarningMessage(FILELOGTAG, "PathProcessingFailed", ex, "Failed to process path: {0}", e.Path);
                        }
                    }
                }
            }
                       ));
        }
Example #16
0
        /// <summary>
        /// Performs the bulk of work by starting all relevant processes
        /// </summary>
        private static async Task RunMainOperation(IEnumerable <string> sources, Snapshots.ISnapshotService snapshot, UsnJournalService journalService, Backup.BackupDatabase database, Backup.BackupStatsCollector stats, Options options, IFilter sourcefilter, IFilter filter, BackupResults result, Common.ITaskReader taskreader, long lastfilesetid)
        {
            using (new Logging.Timer(LOGTAG, "BackupMainOperation", "BackupMainOperation"))
            {
                // Make sure the CompressionHints table is initialized, otherwise all workers will initialize it
                var tb = options.CompressionHints.Count;

                Task all;
                using (new ChannelScope())
                {
                    all = Task.WhenAll(
                        new []
                    {
                        Backup.DataBlockProcessor.Run(database, options, taskreader),
                        Backup.FileBlockProcessor.Run(snapshot, options, database, stats, taskreader),
                        Backup.StreamBlockSplitter.Run(options, database, taskreader),
                        Backup.FileEnumerationProcess.Run(sources, snapshot, journalService, options.FileAttributeFilter, sourcefilter, filter, options.SymlinkPolicy, options.HardlinkPolicy, options.ExcludeEmptyFolders, options.IgnoreFilenames, options.ChangedFilelist, taskreader),
                        Backup.FilePreFilterProcess.Run(snapshot, options, stats, database),
                        Backup.MetadataPreProcess.Run(snapshot, options, database, lastfilesetid),
                        Backup.SpillCollectorProcess.Run(options, database, taskreader),
                        Backup.ProgressHandler.Run(result)
                    }
                        // Spawn additional block hashers
                        .Union(
                            Enumerable.Range(0, options.ConcurrencyBlockHashers - 1).Select(x => Backup.StreamBlockSplitter.Run(options, database, taskreader))
                            )
                        // Spawn additional compressors
                        .Union(
                            Enumerable.Range(0, options.ConcurrencyCompressors - 1).Select(x => Backup.DataBlockProcessor.Run(database, options, taskreader))
                            )
                        );
                }

                await all.ConfigureAwait(false);

                if (options.ChangedFilelist != null && options.ChangedFilelist.Length >= 1)
                {
                    await database.AppendFilesFromPreviousSetAsync(options.DeletedFilelist);
                }
                else if (journalService != null)
                {
                    // append files from previous fileset, unless part of modifiedSources, which we've just scanned
                    await database.AppendFilesFromPreviousSetWithPredicateAsync((path, fileSize) =>
                    {
                        if (journalService.IsPathEnumerated(path))
                        {
                            return(true);
                        }

                        if (fileSize >= 0)
                        {
                            stats.AddExaminedFile(fileSize);
                        }
                        return(false);
                    });

                    // store journal data in database
                    var data = journalService.VolumeDataList.Where(p => p.JournalData != null).Select(p => p.JournalData).ToList();
                    if (data.Any())
                    {
                        // always record change journal data for current fileset (entry may be dropped later if nothing is uploaded)
                        await database.CreateChangeJournalDataAsync(data);

                        // update the previous fileset's change journal entry to resume at this point in case nothing was backed up
                        await database.UpdateChangeJournalDataAsync(data, lastfilesetid);
                    }
                }

                result.OperationProgressUpdater.UpdatefileCount(result.ExaminedFiles, result.SizeOfExaminedFiles, true);
            }
        }
Example #17
0
        public static async Task <Dictionary <string, string> > GenerateMetadataAsync(string path, System.IO.FileAttributes attributes, Options options, Snapshots.ISnapshotService snapshot)
        {
            try
            {
                Dictionary <string, string> metadata;

                if (!options.SkipMetadata)
                {
                    metadata = snapshot.GetMetadata(path, snapshot.IsSymlink(path, attributes), options.SymlinkPolicy == Options.SymlinkStrategy.Follow);
                    if (metadata == null)
                    {
                        metadata = new Dictionary <string, string>();
                    }

                    if (!metadata.ContainsKey("CoreAttributes"))
                    {
                        metadata["CoreAttributes"] = attributes.ToString();
                    }

                    if (!metadata.ContainsKey("CoreLastWritetime"))
                    {
                        try
                        {
                            metadata["CoreLastWritetime"] = snapshot.GetLastWriteTimeUtc(path).Ticks.ToString();
                        }
                        catch (Exception ex)
                        {
                            Logging.Log.WriteWarningMessage(METALOGTAG, "TimestampReadFailed", ex, "Failed to read timestamp on \"{0}\"", path);
                        }
                    }

                    if (!metadata.ContainsKey("CoreCreatetime"))
                    {
                        try
                        {
                            metadata["CoreCreatetime"] = snapshot.GetCreationTimeUtc(path).Ticks.ToString();
                        }
                        catch (Exception ex)
                        {
                            Logging.Log.WriteWarningMessage(METALOGTAG, "TimestampReadFailed", ex, "Failed to read timestamp on \"{0}\"", path);
                        }
                    }
                }
                else
                {
                    metadata = new Dictionary <string, string>();
                }

                return(metadata);
            }
            catch (Exception ex)
            {
                Logging.Log.WriteWarningMessage(METALOGTAG, "MetadataProcessFailed", ex, "Failed to process metadata for \"{0}\", storing empty metadata", path);
                return(new Dictionary <string, string>());
            }
        }
Example #18
0
 public FilterHandler(Snapshots.ISnapshotService snapshot, FileAttributes attributeFilter, Duplicati.Library.Utility.IFilter sourcefilter, Duplicati.Library.Utility.IFilter filter, Options.SymlinkStrategy symlinkPolicy, Options.HardlinkStrategy hardlinkPolicy, ILogWriter logWriter)
 {
     m_snapshot = snapshot;
     m_attributeFilter = attributeFilter;
     m_sourcefilter = sourcefilter;
     m_filter = filter;
     m_symlinkPolicy = symlinkPolicy;
     m_hardlinkPolicy = hardlinkPolicy;
     m_logWriter = logWriter;
     m_hardlinkmap = new Dictionary<string, string>();
 }