Пример #1
0
        /// <summary>
        /// Performs the bulk of work by starting all relevant processes
        /// </summary>
        private static async Task RunMainOperation(IEnumerable <string> sources, Snapshots.ISnapshotService snapshot, UsnJournalService journalService, Backup.BackupDatabase database, Backup.BackupStatsCollector stats, Options options, IFilter sourcefilter, IFilter filter, BackupResults result, Common.ITaskReader taskreader, long lastfilesetid)
        {
            using (new Logging.Timer(LOGTAG, "BackupMainOperation", "BackupMainOperation"))
            {
                // Make sure the CompressionHints table is initialized, otherwise all workers will initialize it
                var tb = options.CompressionHints.Count;

                Task all;
                using (new ChannelScope())
                {
                    all = Task.WhenAll(
                        new []
                    {
                        Backup.DataBlockProcessor.Run(database, options, taskreader),
                        Backup.FileBlockProcessor.Run(snapshot, options, database, stats, taskreader),
                        Backup.StreamBlockSplitter.Run(options, database, taskreader),
                        Backup.FileEnumerationProcess.Run(sources, snapshot, journalService, options.FileAttributeFilter, sourcefilter, filter, options.SymlinkPolicy, options.HardlinkPolicy, options.ExcludeEmptyFolders, options.IgnoreFilenames, options.ChangedFilelist, taskreader),
                        Backup.FilePreFilterProcess.Run(snapshot, options, stats, database),
                        Backup.MetadataPreProcess.Run(snapshot, options, database, lastfilesetid),
                        Backup.SpillCollectorProcess.Run(options, database, taskreader),
                        Backup.ProgressHandler.Run(result)
                    }
                        // Spawn additional block hashers
                        .Union(
                            Enumerable.Range(0, options.ConcurrencyBlockHashers - 1).Select(x => Backup.StreamBlockSplitter.Run(options, database, taskreader))
                            )
                        // Spawn additional compressors
                        .Union(
                            Enumerable.Range(0, options.ConcurrencyCompressors - 1).Select(x => Backup.DataBlockProcessor.Run(database, options, taskreader))
                            )
                        );
                }

                await all.ConfigureAwait(false);

                if (options.ChangedFilelist != null && options.ChangedFilelist.Length >= 1)
                {
                    await database.AppendFilesFromPreviousSetAsync(options.DeletedFilelist);
                }
                else if (journalService != null)
                {
                    // append files from previous fileset, unless part of modifiedSources, which we've just scanned
                    await database.AppendFilesFromPreviousSetWithPredicateAsync((path, fileSize) =>
                    {
                        if (journalService.IsPathEnumerated(path))
                        {
                            return(true);
                        }

                        if (fileSize >= 0)
                        {
                            stats.AddExaminedFile(fileSize);
                        }
                        return(false);
                    });

                    // store journal data in database
                    var data = journalService.VolumeDataList.Where(p => p.JournalData != null).Select(p => p.JournalData).ToList();
                    if (data.Any())
                    {
                        // always record change journal data for current fileset (entry may be dropped later if nothing is uploaded)
                        await database.CreateChangeJournalDataAsync(data);

                        // update the previous fileset's change journal entry to resume at this point in case nothing was backed up
                        await database.UpdateChangeJournalDataAsync(data, lastfilesetid);
                    }
                }

                result.OperationProgressUpdater.UpdatefileCount(result.ExaminedFiles, result.SizeOfExaminedFiles, true);
            }
        }
Пример #2
0
        public static Task Run(Snapshots.ISnapshotService snapshot, BackupResults result, Options options, IFilter sourcefilter, IFilter filter, Common.ITaskReader taskreader, System.Threading.CancellationToken token)
        {
            // Make sure we create the enumeration process in a seperate scope,
            // but keep the log channel from the parent scope
            using (Logging.Log.StartIsolatingScope())
                using (new IsolatedChannelScope())
                {
                    var enumeratorTask = Backup.FileEnumerationProcess.Run(snapshot, options.FileAttributeFilter, sourcefilter, filter, options.SymlinkPolicy, options.HardlinkPolicy, options.ExcludeEmptyFolders, options.IgnoreFilenames, options.ChangedFilelist, taskreader);
                    var counterTask    = AutomationExtensions.RunTask(new
                    {
                        Input = Backup.Channels.SourcePaths.ForRead
                    },

                                                                      async self =>
                    {
                        var count = 0L;
                        var size  = 0L;

                        try
                        {
                            while (await taskreader.ProgressAsync && !token.IsCancellationRequested)
                            {
                                var path = await self.Input.ReadAsync();

                                count++;

                                try
                                {
                                    size += snapshot.GetFileSize(path);
                                }
                                catch
                                {
                                }

                                result.OperationProgressUpdater.UpdatefileCount(count, size, false);
                            }
                        }
                        finally
                        {
                            result.OperationProgressUpdater.UpdatefileCount(count, size, true);
                        }
                    });

                    return(Task.WhenAll(enumeratorTask, counterTask));
                }
        }
Пример #3
0
        public static Task Run(Common.BackendHandler backend, Options options, Common.DatabaseCommon database, BackupResults results, Common.ITaskReader taskreader, StatsCollector stats)
        {
            return(AutomationExtensions.RunTask(new
            {
                Input = Channels.BackendRequest.ForRead,
            },

                                                async self =>
            {
                var inProgress = new Queue <KeyValuePair <int, Task> >();
                var max_pending = options.AsynchronousUploadLimit == 0 ? long.MaxValue : options.AsynchronousUploadLimit;
                var noIndexFiles = options.IndexfilePolicy == Options.IndexFileStrategy.None;
                var active = 0;
                var lastSize = -1L;

                while (!await self.Input.IsRetiredAsync && await taskreader.ProgressAsync)
                {
                    try
                    {
                        var req = await self.Input.ReadAsync();

                        if (!await taskreader.ProgressAsync)
                        {
                            continue;
                        }

                        var task = default(KeyValuePair <int, Task>);
                        if (req is VolumeUploadRequest)
                        {
                            lastSize = ((VolumeUploadRequest)req).BlockVolume.SourceSize;

                            if (noIndexFiles || ((VolumeUploadRequest)req).IndexVolume == null)
                            {
                                task = new KeyValuePair <int, Task>(1, backend.UploadFileAsync(((VolumeUploadRequest)req).BlockVolume, null));
                            }
                            else
                            {
                                task = new KeyValuePair <int, Task>(2, backend.UploadFileAsync(((VolumeUploadRequest)req).BlockVolume, name => ((VolumeUploadRequest)req).IndexVolume.CreateVolume(name, options, database)));
                            }
                        }
                        else if (req is FilesetUploadRequest)
                        {
                            task = new KeyValuePair <int, Task>(1, backend.UploadFileAsync(((FilesetUploadRequest)req).Fileset));
                        }
                        else if (req is IndexVolumeUploadRequest)
                        {
                            task = new KeyValuePair <int, Task>(1, backend.UploadFileAsync(((IndexVolumeUploadRequest)req).IndexVolume));
                        }
                        else if (req is FlushRequest)
                        {
                            try
                            {
                                while (inProgress.Count > 0)
                                {
                                    await inProgress.Dequeue().Value;
                                }
                                active = 0;
                            }
                            finally
                            {
                                ((FlushRequest)req).SetFlushed(lastSize);
                            }
                        }

                        if (task.Value != null)
                        {
                            inProgress.Enqueue(task);
                            active += task.Key;
                        }
                    }
                    catch (Exception ex)
                    {
                        if (!ex.IsRetiredException())
                        {
                            throw;
                        }
                    }

                    while (active >= max_pending)
                    {
                        var top = inProgress.Dequeue();

                        // See if we are done
                        if (await Task.WhenAny(top.Value, Task.Delay(500)) != top.Value)
                        {
                            try
                            {
                                stats.SetBlocking(true);
                                await top.Value;
                            }
                            finally
                            {
                                stats.SetBlocking(false);
                            }
                        }

                        active -= top.Key;
                    }
                }

                results.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_WaitForUpload);

                try
                {
                    stats.SetBlocking(true);
                    while (inProgress.Count > 0)
                    {
                        await inProgress.Dequeue().Value;
                    }
                }
                finally
                {
                    stats.SetBlocking(false);
                }
            }));
        }
Пример #4
0
        public static Task Run(BackupResults result, BackupDatabase db, Options options, FilesetVolumeWriter filesetvolume, long filesetid, Common.ITaskReader taskreader)
        {
            return(AutomationExtensions.RunTask(new
            {
                Output = Channels.BackendRequest.ForWrite,
            },

                                                async self =>
            {
                if (!await taskreader.ProgressAsync)
                {
                    return;
                }

                // Update the reported source and backend changes
                using (new Logging.Timer(LOGTAG, "UpdateChangeStatistics", "UpdateChangeStatistics"))
                    await db.UpdateChangeStatisticsAsync(result);

                var changeCount =
                    result.AddedFiles + result.ModifiedFiles + result.DeletedFiles +
                    result.AddedFolders + result.ModifiedFolders + result.DeletedFolders +
                    result.AddedSymlinks + result.ModifiedSymlinks + result.DeletedSymlinks;

                //Changes in the filelist triggers a filelist upload
                if (options.UploadUnchangedBackups || changeCount > 0)
                {
                    using (new Logging.Timer(LOGTAG, "UploadNewFileset", "Uploading a new fileset"))
                    {
                        if (!string.IsNullOrEmpty(options.ControlFiles))
                        {
                            foreach (var p in options.ControlFiles.Split(new char[] { System.IO.Path.PathSeparator }, StringSplitOptions.RemoveEmptyEntries))
                            {
                                filesetvolume.AddControlFile(p, options.GetCompressionHintFromFilename(p));
                            }
                        }

                        if (!await taskreader.ProgressAsync)
                        {
                            return;
                        }

                        await db.WriteFilesetAsync(filesetvolume, filesetid);
                        filesetvolume.Close();

                        if (!await taskreader.ProgressAsync)
                        {
                            return;
                        }

                        await db.UpdateRemoteVolumeAsync(filesetvolume.RemoteFilename, RemoteVolumeState.Uploading, -1, null);
                        await db.CommitTransactionAsync("CommitUpdateRemoteVolume");
                        await self.Output.WriteAsync(new FilesetUploadRequest(filesetvolume));
                    }
                }
                else
                {
                    Logging.Log.WriteVerboseMessage(LOGTAG, "RemovingLeftoverTempFile", "removing temp files, as no data needs to be uploaded");
                    await db.RemoveRemoteVolumeAsync(filesetvolume.RemoteFilename);
                }

                await db.CommitTransactionAsync("CommitUpdateRemoteVolume");
            }));
        }
Пример #5
0
        /// <summary>
        /// Performs the bulk of work by starting all relevant processes
        /// </summary>
        private static async Task RunMainOperation(Snapshots.ISnapshotService snapshot, Backup.BackupDatabase database, Backup.BackupStatsCollector stats, Options options, IFilter sourcefilter, IFilter filter, BackupResults result, Common.ITaskReader taskreader, long lastfilesetid)
        {
            using (new Logging.Timer(LOGTAG, "BackupMainOperation", "BackupMainOperation"))
            {
                // Make sure the CompressionHints table is initialized, otherwise all workers will initialize it
                var tb = options.CompressionHints.Count;

                Task all;
                using (new ChannelScope())
                {
                    all = Task.WhenAll(
                        new []
                    {
                        Backup.DataBlockProcessor.Run(database, options, taskreader),
                        Backup.FileBlockProcessor.Run(snapshot, options, database, stats, taskreader),
                        Backup.StreamBlockSplitter.Run(options, database, taskreader),
                        Backup.FileEnumerationProcess.Run(snapshot, options.FileAttributeFilter, sourcefilter, filter, options.SymlinkPolicy, options.HardlinkPolicy, options.ExcludeEmptyFolders, options.IgnoreFilenames, options.ChangedFilelist, taskreader),
                        Backup.FilePreFilterProcess.Run(snapshot, options, stats, database),
                        Backup.MetadataPreProcess.Run(snapshot, options, database, lastfilesetid),
                        Backup.SpillCollectorProcess.Run(options, database, taskreader),
                        Backup.ProgressHandler.Run(result)
                    }
                        // Spawn additional block hashers
                        .Union(
                            Enumerable.Range(0, options.ConcurrencyBlockHashers - 1).Select(x => Backup.StreamBlockSplitter.Run(options, database, taskreader))
                            )
                        // Spawn additional compressors
                        .Union(
                            Enumerable.Range(0, options.ConcurrencyCompressors - 1).Select(x => Backup.DataBlockProcessor.Run(database, options, taskreader))
                            )
                        );
                }

                await all;

                if (options.ChangedFilelist != null && options.ChangedFilelist.Length >= 1)
                {
                    await database.AppendFilesFromPreviousSetAsync(options.DeletedFilelist);
                }

                result.OperationProgressUpdater.UpdatefileCount(result.ExaminedFiles, result.SizeOfExaminedFiles, true);
            }
        }
        public static Task Run(Snapshots.ISnapshotService snapshot, FileAttributes attributeFilter, Duplicati.Library.Utility.IFilter sourcefilter, Duplicati.Library.Utility.IFilter emitfilter, Options.SymlinkStrategy symlinkPolicy, Options.HardlinkStrategy hardlinkPolicy, bool excludeemptyfolders, string[] ignorenames, string[] changedfilelist, Common.ITaskReader taskreader)
        {
            return(AutomationExtensions.RunTask(
                       new
            {
                Output = Backup.Channels.SourcePaths.ForWrite
            },

                       async self =>
            {
                var hardlinkmap = new Dictionary <string, string>();
                var mixinqueue = new Queue <string>();
                Duplicati.Library.Utility.IFilter enumeratefilter = emitfilter;

                bool includes;
                bool excludes;
                Library.Utility.FilterExpression.AnalyzeFilters(emitfilter, out includes, out excludes);
                if (includes && !excludes)
                {
                    enumeratefilter = Library.Utility.FilterExpression.Combine(emitfilter, new Duplicati.Library.Utility.FilterExpression("*" + System.IO.Path.DirectorySeparatorChar, true));
                }

                // Simplify checking for an empty list
                if (ignorenames != null && ignorenames.Length == 0)
                {
                    ignorenames = null;
                }

                // If we have a specific list, use that instead of enumerating the filesystem
                IEnumerable <string> worklist;
                if (changedfilelist != null && changedfilelist.Length > 0)
                {
                    worklist = changedfilelist.Where(x =>
                    {
                        var fa = FileAttributes.Normal;
                        try
                        {
                            fa = snapshot.GetAttributes(x);
                        }
                        catch
                        {
                        }

                        return AttributeFilterAsync(null, x, fa, snapshot, sourcefilter, hardlinkPolicy, symlinkPolicy, hardlinkmap, attributeFilter, enumeratefilter, ignorenames, mixinqueue).WaitForTask().Result;
                    });
                }
                else
                {
                    worklist = snapshot.EnumerateFilesAndFolders((root, path, attr) =>
                    {
                        return AttributeFilterAsync(root, path, attr, snapshot, sourcefilter, hardlinkPolicy, symlinkPolicy, hardlinkmap, attributeFilter, enumeratefilter, ignorenames, mixinqueue).WaitForTask().Result;
                    }, (rootpath, path, ex) =>
                    {
                        Logging.Log.WriteWarningMessage(FILTER_LOGTAG, "FileAccessError", ex, "Error reported while accessing file: {0}", path);
                    });
                }

                var source = ExpandWorkList(worklist, mixinqueue, emitfilter, enumeratefilter);
                if (excludeemptyfolders)
                {
                    source = ExcludeEmptyFolders(source);
                }

                // Process each path, and dequeue the mixins with symlinks as we go
                foreach (var s in source)
                {
                    if (!await taskreader.ProgressAsync)
                    {
                        return;
                    }

                    await self.Output.WriteAsync(s);
                }
            }));
        }