public static Task Run(BackupDatabase database, Options options, BackupResults result, ITaskReader taskreader)
        {
            return(AutomationExtensions.RunTask(new
            {
                UploadChannel = Channels.BackendRequest.ForWrite
            },

                                                async self =>
            {
                if (options.IndexfilePolicy != Options.IndexFileStrategy.None)
                {
                    foreach (var blockfile in await database.GetMissingIndexFilesAsync())
                    {
                        if (!await taskreader.ProgressAsync)
                        {
                            return;
                        }

                        Logging.Log.WriteInformationMessage(LOGTAG, "RecreateMissingIndexFile", "Re-creating missing index file for {0}", blockfile);
                        var w = await Common.IndexVolumeCreator.CreateIndexVolume(blockfile, options, database);

                        if (!await taskreader.ProgressAsync)
                        {
                            return;
                        }

                        await database.UpdateRemoteVolumeAsync(w.RemoteFilename, RemoteVolumeState.Uploading, -1, null);
                        await self.UploadChannel.WriteAsync(new IndexVolumeUploadRequest(w));
                    }
                }
            }));
        }
Exemplo n.º 2
0
        public BackupHandler(string backendurl, Options options, BackupResults results)
        {
            EMPTY_METADATA = Utility.WrapMetadata(new Dictionary<string, string>(), options);

            m_options = options;
            m_result = results;
            m_backendurl = backendurl;

            m_attributeFilter = m_options.FileAttributeFilter;
            m_symlinkPolicy = m_options.SymlinkPolicy;
            m_blocksize = m_options.Blocksize;

            m_blockbuffer = new byte[m_options.Blocksize * Math.Max(1, m_options.FileReadBufferSize / m_options.Blocksize)];
            m_blocklistbuffer = new byte[m_options.Blocksize];

            m_blockhasher = System.Security.Cryptography.HashAlgorithm.Create(m_options.BlockHashAlgorithm);
            m_filehasher = System.Security.Cryptography.HashAlgorithm.Create(m_options.FileHashAlgorithm);

            if (m_blockhasher == null)
                throw new Exception(string.Format(Strings.Foresthash.InvalidHashAlgorithm, m_options.BlockHashAlgorithm));
            if (m_filehasher == null)
                throw new Exception(string.Format(Strings.Foresthash.InvalidHashAlgorithm, m_options.FileHashAlgorithm));

            if (!m_blockhasher.CanReuseTransform)
                throw new Exception(string.Format(Strings.Foresthash.InvalidCryptoSystem, m_options.BlockHashAlgorithm));
            if (!m_filehasher.CanReuseTransform)
                throw new Exception(string.Format(Strings.Foresthash.InvalidCryptoSystem, m_options.FileHashAlgorithm));

            if (options.AllowPassphraseChange)
                throw new Exception(Strings.Foresthash.PassphraseChangeUnsupported);
        }
Exemplo n.º 3
0
        internal void UpdateChangeStatistics(BackupResults results)
        {
            using (var cmd = m_connection.CreateCommand())
            {
                var lastFilesetId = GetPreviousFilesetID(cmd);
                results.AddedFolders  = Convert.ToInt64(cmd.ExecuteScalar(@"SELECT COUNT(*) FROM ""File"" INNER JOIN ""FilesetEntry"" ON ""File"".""ID"" = ""FilesetEntry"".""FileID"" WHERE ""FilesetEntry"".""FilesetID"" = ? AND ""File"".""BlocksetID"" = ? AND NOT ""File"".""Path"" IN (SELECT ""Path"" FROM ""File"" INNER JOIN ""FilesetEntry"" ON ""File"".""ID"" = ""FilesetEntry"".""FileID"" WHERE ""FilesetEntry"".""FilesetID"" = ?)", m_filesetId, FOLDER_BLOCKSET_ID, lastFilesetId));
                results.AddedSymlinks = Convert.ToInt64(cmd.ExecuteScalar(@"SELECT COUNT(*) FROM ""File"" INNER JOIN ""FilesetEntry"" ON ""File"".""ID"" = ""FilesetEntry"".""FileID"" WHERE ""FilesetEntry"".""FilesetID"" = ? AND ""File"".""BlocksetID"" = ? AND NOT ""File"".""Path"" IN (SELECT ""Path"" FROM ""File"" INNER JOIN ""FilesetEntry"" ON ""File"".""ID"" = ""FilesetEntry"".""FileID"" WHERE ""FilesetEntry"".""FilesetID"" = ?)", m_filesetId, SYMLINK_BLOCKSET_ID, lastFilesetId));

                results.DeletedFolders  = Convert.ToInt64(cmd.ExecuteScalar(@"SELECT COUNT(*) FROM ""File"" INNER JOIN ""FilesetEntry"" ON ""File"".""ID"" = ""FilesetEntry"".""FileID"" WHERE ""FilesetEntry"".""FilesetID"" = ? AND ""File"".""BlocksetID"" = ? AND NOT ""File"".""Path"" IN (SELECT ""Path"" FROM ""File"" INNER JOIN ""FilesetEntry"" ON ""File"".""ID"" = ""FilesetEntry"".""FileID"" WHERE ""FilesetEntry"".""FilesetID"" = ?)", lastFilesetId, FOLDER_BLOCKSET_ID, m_filesetId));
                results.DeletedSymlinks = Convert.ToInt64(cmd.ExecuteScalar(@"SELECT COUNT(*) FROM ""File"" INNER JOIN ""FilesetEntry"" ON ""File"".""ID"" = ""FilesetEntry"".""FileID"" WHERE ""FilesetEntry"".""FilesetID"" = ? AND ""File"".""BlocksetID"" = ? AND NOT ""File"".""Path"" IN (SELECT ""Path"" FROM ""File"" INNER JOIN ""FilesetEntry"" ON ""File"".""ID"" = ""FilesetEntry"".""FileID"" WHERE ""FilesetEntry"".""FilesetID"" = ?)", lastFilesetId, SYMLINK_BLOCKSET_ID, m_filesetId));

                var subqueryNonFiles = @"SELECT ""File"".""Path"", ""Blockset"".""Fullhash"" FROM ""File"", ""FilesetEntry"", ""Metadataset"", ""Blockset"" WHERE ""File"".""ID"" = ""FilesetEntry"".""FileID"" AND ""Metadataset"".""ID"" = ""File"".""MetadataID"" AND ""File"".""BlocksetID"" = ? AND ""Metadataset"".""BlocksetID"" = ""Blockset"".""ID"" AND ""FilesetEntry"".""FilesetID"" = ? ";
                results.ModifiedFolders  = Convert.ToInt64(cmd.ExecuteScalar(@"SELECT COUNT(*) FROM (" + subqueryNonFiles + @") A, (" + subqueryNonFiles + @") B WHERE ""A"".""Path"" = ""B"".""Path"" AND ""A"".""Fullhash"" != ""B"".""Fullhash"" ", lastFilesetId, FOLDER_BLOCKSET_ID, m_filesetId, FOLDER_BLOCKSET_ID));
                results.ModifiedSymlinks = Convert.ToInt64(cmd.ExecuteScalar(@"SELECT COUNT(*) FROM (" + subqueryNonFiles + @") A, (" + subqueryNonFiles + @") B WHERE ""A"".""Path"" = ""B"".""Path"" AND ""A"".""Fullhash"" != ""B"".""Fullhash"" ", lastFilesetId, SYMLINK_BLOCKSET_ID, m_filesetId, SYMLINK_BLOCKSET_ID));

                var tmpName = "TmpFileList-" + Library.Utility.Utility.ByteArrayAsHexString(Guid.NewGuid().ToByteArray());
                try
                {
                    var subqueryFiles = @"SELECT ""File"".""Path"", ""A"".""Fullhash"" AS ""Filehash"", ""B"".""Fullhash"" AS ""Metahash"" FROM ""File"", ""FilesetEntry"", ""Blockset"" A, ""Blockset"" B, ""Metadataset""  WHERE ""File"".""ID"" = ""FilesetEntry"".""FileID"" AND ""A"".""ID"" = ""File"".""BlocksetID"" AND ""FilesetEntry"".""FilesetID"" = ? AND ""File"".""MetadataID"" = ""Metadataset"".""ID"" AND ""Metadataset"".""BlocksetID"" = ""B"".""ID"" ";

                    cmd.ExecuteNonQuery(string.Format(@"CREATE TEMPORARY TABLE ""{0}"" AS " + subqueryFiles, tmpName), lastFilesetId);

                    results.AddedFiles    = Convert.ToInt64(cmd.ExecuteScalar(string.Format(@"SELECT COUNT(*) FROM ""File"" INNER JOIN ""FilesetEntry"" ON ""File"".""ID"" = ""FilesetEntry"".""FileID"" WHERE ""FilesetEntry"".""FilesetID"" = ? AND ""File"".""BlocksetID"" != ? AND ""File"".""BlocksetID"" != ? AND NOT ""File"".""Path"" IN (SELECT ""Path"" FROM ""{0}"")", tmpName), m_filesetId, FOLDER_BLOCKSET_ID, SYMLINK_BLOCKSET_ID));
                    results.DeletedFiles  = Convert.ToInt64(cmd.ExecuteScalar(string.Format(@"SELECT COUNT(*) FROM ""{0}"" WHERE ""{0}"".""Path"" NOT IN (SELECT ""Path"" FROM ""File"" INNER JOIN ""FilesetEntry"" ON ""File"".""ID"" = ""FilesetEntry"".""FileID"" WHERE ""FilesetEntry"".""FilesetID"" = ?)", tmpName), m_filesetId));
                    results.ModifiedFiles = Convert.ToInt64(cmd.ExecuteScalar(string.Format(@"SELECT COUNT(*) FROM ""{0}"" A, (" + subqueryFiles + @") B WHERE ""A"".""Path"" = ""B"".""Path"" AND (""A"".""Filehash"" != ""B"".""Filehash"" OR ""A"".""Metahash"" != ""B"".""Metahash"")", tmpName), m_filesetId));
                }
                finally
                {
                    try { cmd.ExecuteNonQuery(string.Format(@"DROP TABLE IF EXISTS ""{0}"";", tmpName)); }
                    catch (Exception ex) { m_result.AddWarning("Dispose temp table error", ex); }
                }
            }
        }
Exemplo n.º 4
0
        public BackupHandler(string backendurl, Options options, BackupResults results)
        {
            m_options    = options;
            m_result     = results;
            m_backendurl = backendurl;

            if (options.AllowPassphraseChange)
            {
                throw new UserInformationException(Strings.Common.PassphraseChangeUnsupported, "PassphraseChangeUnsupported");
            }
        }
Exemplo n.º 5
0
        private static async Task <long> FlushBackend(BackupResults result, IWriteChannel <Backup.IUploadRequest> uploadtarget, Task uploader)
        {
            var flushReq = new Backup.FlushRequest();

            // Wait for upload completion
            result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_WaitForUpload);
            await uploadtarget.WriteAsync(flushReq).ConfigureAwait(false);

            await uploader.ConfigureAwait(false);

            // Grab the size of the last uploaded volume
            return(await flushReq.LastWriteSizeAsync);
        }
Exemplo n.º 6
0
        public BackupHandler(string backendurl, Options options, BackupResults results)
        {
            EMPTY_METADATA = Utility.WrapMetadata(new Dictionary<string, string>(), options);

            m_options = options;
            m_result = results;
            m_backendurl = backendurl;

            m_attributeFilter = m_options.FileAttributeFilter;
            m_symlinkPolicy = m_options.SymlinkPolicy;

            if (options.AllowPassphraseChange)
                throw new Exception(Strings.Foresthash.PassphraseChangeUnsupported);
        }
Exemplo n.º 7
0
        public static Task Run(Snapshots.ISnapshotService snapshot, BackupResults result, Options options, IFilter sourcefilter, IFilter filter, Common.ITaskReader taskreader, System.Threading.CancellationToken token)
        {
            // Make sure we create the enumeration process in a seperate scope,
            // but keep the log channel from the parent scope
            using (Logging.Log.StartIsolatingScope())
                using (new IsolatedChannelScope())
                {
                    var enumeratorTask = Backup.FileEnumerationProcess.Run(snapshot, options.FileAttributeFilter, sourcefilter, filter, options.SymlinkPolicy, options.HardlinkPolicy, options.ExcludeEmptyFolders, options.IgnoreFilenames, options.ChangedFilelist, taskreader);
                    var counterTask    = AutomationExtensions.RunTask(new
                    {
                        Input = Backup.Channels.SourcePaths.ForRead
                    },

                                                                      async self =>
                    {
                        var count = 0L;
                        var size  = 0L;

                        try
                        {
                            while (await taskreader.ProgressAsync && !token.IsCancellationRequested)
                            {
                                var path = await self.Input.ReadAsync();

                                count++;

                                try
                                {
                                    size += snapshot.GetFileSize(path);
                                }
                                catch
                                {
                                }

                                result.OperationProgressUpdater.UpdatefileCount(count, size, false);
                            }
                        }
                        finally
                        {
                            result.OperationProgressUpdater.UpdatefileCount(count, size, true);
                        }
                    });

                    return(Task.WhenAll(enumeratorTask, counterTask));
                }
        }
Exemplo n.º 8
0
        /// <summary>
        /// Performs the bulk of work by starting all relevant processes
        /// </summary>
        private static async Task RunMainOperation(IEnumerable <string> sources, Snapshots.ISnapshotService snapshot, UsnJournalService journalService, Backup.BackupDatabase database, Backup.BackupStatsCollector stats, Options options, IFilter sourcefilter, IFilter filter, BackupResults result, Common.ITaskReader taskreader, long lastfilesetid)
        {
            using (new Logging.Timer(LOGTAG, "BackupMainOperation", "BackupMainOperation"))
            {
                // Make sure the CompressionHints table is initialized, otherwise all workers will initialize it
                var tb = options.CompressionHints.Count;

                Task all;
                using (new ChannelScope())
                {
                    all = Task.WhenAll(
                        new []
                    {
                        Backup.DataBlockProcessor.Run(database, options, taskreader),
                        Backup.FileBlockProcessor.Run(snapshot, options, database, stats, taskreader),
                        Backup.StreamBlockSplitter.Run(options, database, taskreader),
                        Backup.FileEnumerationProcess.Run(sources, snapshot, journalService, options.FileAttributeFilter, sourcefilter, filter, options.SymlinkPolicy, options.HardlinkPolicy, options.ExcludeEmptyFolders, options.IgnoreFilenames, options.ChangedFilelist, taskreader),
                        Backup.FilePreFilterProcess.Run(snapshot, options, stats, database),
                        Backup.MetadataPreProcess.Run(snapshot, options, database, lastfilesetid),
                        Backup.SpillCollectorProcess.Run(options, database, taskreader),
                        Backup.ProgressHandler.Run(result)
                    }
                        // Spawn additional block hashers
                        .Union(
                            Enumerable.Range(0, options.ConcurrencyBlockHashers - 1).Select(x => Backup.StreamBlockSplitter.Run(options, database, taskreader))
                            )
                        // Spawn additional compressors
                        .Union(
                            Enumerable.Range(0, options.ConcurrencyCompressors - 1).Select(x => Backup.DataBlockProcessor.Run(database, options, taskreader))
                            )
                        );
                }

                await all.ConfigureAwait(false);

                if (options.ChangedFilelist != null && options.ChangedFilelist.Length >= 1)
                {
                    await database.AppendFilesFromPreviousSetAsync(options.DeletedFilelist);
                }
                else if (journalService != null)
                {
                    // append files from previous fileset, unless part of modifiedSources, which we've just scanned
                    await database.AppendFilesFromPreviousSetWithPredicateAsync((path, fileSize) =>
                    {
                        if (journalService.IsPathEnumerated(path))
                        {
                            return(true);
                        }

                        if (fileSize >= 0)
                        {
                            stats.AddExaminedFile(fileSize);
                        }
                        return(false);
                    });

                    // store journal data in database
                    var data = journalService.VolumeDataList.Where(p => p.JournalData != null).Select(p => p.JournalData).ToList();
                    if (data.Any())
                    {
                        // always record change journal data for current fileset (entry may be dropped later if nothing is uploaded)
                        await database.CreateChangeJournalDataAsync(data);

                        // update the previous fileset's change journal entry to resume at this point in case nothing was backed up
                        await database.UpdateChangeJournalDataAsync(data, lastfilesetid);
                    }
                }

                result.OperationProgressUpdater.UpdatefileCount(result.ExaminedFiles, result.SizeOfExaminedFiles, true);
            }
        }
Exemplo n.º 9
0
 public Task UpdateChangeStatisticsAsync(BackupResults result)
 {
     return(RunOnMain(() => m_database.UpdateChangeStatistics(result, m_transaction)));
 }
Exemplo n.º 10
0
        public static Task Run(Common.BackendHandler backend, Options options, Common.DatabaseCommon database, BackupResults results, Common.ITaskReader taskreader, StatsCollector stats)
        {
            return(AutomationExtensions.RunTask(new
            {
                Input = Channels.BackendRequest.ForRead,
            },

                                                async self =>
            {
                var inProgress = new Queue <KeyValuePair <int, Task> >();
                var max_pending = options.AsynchronousUploadLimit == 0 ? long.MaxValue : options.AsynchronousUploadLimit;
                var noIndexFiles = options.IndexfilePolicy == Options.IndexFileStrategy.None;
                var active = 0;
                var lastSize = -1L;

                while (!await self.Input.IsRetiredAsync && await taskreader.ProgressAsync)
                {
                    try
                    {
                        var req = await self.Input.ReadAsync();

                        if (!await taskreader.ProgressAsync)
                        {
                            continue;
                        }

                        var task = default(KeyValuePair <int, Task>);
                        if (req is VolumeUploadRequest)
                        {
                            lastSize = ((VolumeUploadRequest)req).BlockVolume.SourceSize;

                            if (noIndexFiles || ((VolumeUploadRequest)req).IndexVolume == null)
                            {
                                task = new KeyValuePair <int, Task>(1, backend.UploadFileAsync(((VolumeUploadRequest)req).BlockVolume, null));
                            }
                            else
                            {
                                task = new KeyValuePair <int, Task>(2, backend.UploadFileAsync(((VolumeUploadRequest)req).BlockVolume, name => ((VolumeUploadRequest)req).IndexVolume.CreateVolume(name, options, database)));
                            }
                        }
                        else if (req is FilesetUploadRequest)
                        {
                            task = new KeyValuePair <int, Task>(1, backend.UploadFileAsync(((FilesetUploadRequest)req).Fileset));
                        }
                        else if (req is IndexVolumeUploadRequest)
                        {
                            task = new KeyValuePair <int, Task>(1, backend.UploadFileAsync(((IndexVolumeUploadRequest)req).IndexVolume));
                        }
                        else if (req is FlushRequest)
                        {
                            try
                            {
                                while (inProgress.Count > 0)
                                {
                                    await inProgress.Dequeue().Value;
                                }
                                active = 0;
                            }
                            finally
                            {
                                ((FlushRequest)req).SetFlushed(lastSize);
                            }
                        }

                        if (task.Value != null)
                        {
                            inProgress.Enqueue(task);
                            active += task.Key;
                        }
                    }
                    catch (Exception ex)
                    {
                        if (!ex.IsRetiredException())
                        {
                            throw;
                        }
                    }

                    while (active >= max_pending)
                    {
                        var top = inProgress.Dequeue();

                        // See if we are done
                        if (await Task.WhenAny(top.Value, Task.Delay(500)) != top.Value)
                        {
                            try
                            {
                                stats.SetBlocking(true);
                                await top.Value;
                            }
                            finally
                            {
                                stats.SetBlocking(false);
                            }
                        }

                        active -= top.Key;
                    }
                }

                results.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_WaitForUpload);

                try
                {
                    stats.SetBlocking(true);
                    while (inProgress.Count > 0)
                    {
                        await inProgress.Dequeue().Value;
                    }
                }
                finally
                {
                    stats.SetBlocking(false);
                }
            }));
        }
Exemplo n.º 11
0
        public static Task Run(BackupDatabase database, Options options, BackupResults result, ITaskReader taskreader, string lasttempfilelist, long lasttempfileid)
        {
            return(AutomationExtensions.RunTask(new
            {
                UploadChannel = Channels.BackendRequest.ForWrite
            },

                                                async self =>
            {
                // Check if we should upload a synthetic filelist
                if (options.DisableSyntheticFilelist || string.IsNullOrWhiteSpace(lasttempfilelist) || lasttempfileid < 0)
                {
                    return;
                }

                // Check that we still need to process this after the cleanup has performed its duties
                var syntbase = await database.GetRemoteVolumeFromIDAsync(lasttempfileid);

                // If we do not have a valid entry, warn and quit
                if (syntbase.Name == null || syntbase.State != RemoteVolumeState.Uploaded)
                {
                    // TODO: If the repair succeeds, this could give a false warning?
                    Logging.Log.WriteWarningMessage(LOGTAG, "MissingTemporaryFilelist", null, "Expected there to be a temporary fileset for synthetic filelist ({0}, {1}), but none was found?", lasttempfileid, lasttempfilelist);
                    return;
                }

                // Files is missing or repaired
                if (syntbase.Name == null || (syntbase.State != RemoteVolumeState.Uploading && syntbase.State != RemoteVolumeState.Temporary))
                {
                    Logging.Log.WriteInformationMessage(LOGTAG, "SkippingSyntheticListUpload", "Skipping synthetic upload because temporary fileset appers to be complete: ({0}, {1}, {2})", lasttempfileid, lasttempfilelist, syntbase.State);
                    return;
                }

                // Ready to build and upload the synthetic list
                await database.CommitTransactionAsync("PreSyntheticFilelist");
                var incompleteFilesets = (await database.GetIncompleteFilesetsAsync()).OrderBy(x => x.Value).ToList();

                result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_PreviousBackupFinalize);
                Logging.Log.WriteInformationMessage(LOGTAG, "PreviousBackupFilelistUpload", "Uploading filelist from previous interrupted backup");

                if (!await taskreader.ProgressAsync)
                {
                    return;
                }

                var incompleteSet = incompleteFilesets.Last();
                var badIds = from n in incompleteFilesets select n.Key;

                var prevs = (from n in await database.GetFilesetTimesAsync()
                             where
                             n.Key < incompleteSet.Key
                             &&
                             !badIds.Contains(n.Key)
                             orderby n.Key
                             select n.Key).ToArray();

                var prevId = prevs.Length == 0 ? -1 : prevs.Last();

                FilesetVolumeWriter fsw = null;
                try
                {
                    var s = 1;
                    var fileTime = incompleteSet.Value + TimeSpan.FromSeconds(s);
                    var oldFilesetID = incompleteSet.Key;

                    // Probe for an unused filename
                    while (s < 60)
                    {
                        var id = await database.GetRemoteVolumeIDAsync(VolumeBase.GenerateFilename(RemoteVolumeType.Files, options, null, fileTime));
                        if (id < 0)
                        {
                            break;
                        }

                        fileTime = incompleteSet.Value + TimeSpan.FromSeconds(++s);
                    }

                    fsw = new FilesetVolumeWriter(options, fileTime);
                    fsw.VolumeID = await database.RegisterRemoteVolumeAsync(fsw.RemoteFilename, RemoteVolumeType.Files, RemoteVolumeState.Temporary);

                    if (!string.IsNullOrEmpty(options.ControlFiles))
                    {
                        foreach (var p in options.ControlFiles.Split(new char[] { System.IO.Path.PathSeparator }, StringSplitOptions.RemoveEmptyEntries))
                        {
                            fsw.AddControlFile(p, options.GetCompressionHintFromFilename(p));
                        }
                    }

                    var newFilesetID = await database.CreateFilesetAsync(fsw.VolumeID, fileTime);
                    await database.LinkFilesetToVolumeAsync(newFilesetID, fsw.VolumeID);
                    await database.AppendFilesFromPreviousSetAsync(null, newFilesetID, prevId, fileTime);

                    await database.WriteFilesetAsync(fsw, newFilesetID);

                    if (!await taskreader.ProgressAsync)
                    {
                        return;
                    }

                    await database.UpdateRemoteVolumeAsync(fsw.RemoteFilename, RemoteVolumeState.Uploading, -1, null);
                    await database.CommitTransactionAsync("CommitUpdateFilelistVolume");
                    await self.UploadChannel.WriteAsync(new FilesetUploadRequest(fsw));
                    fsw = null;
                }
                catch
                {
                    await database.RollbackTransactionAsync();
                    throw;
                }
                finally
                {
                    if (fsw != null)
                    {
                        try { fsw.Dispose(); }
                        catch { fsw = null; }
                    }
                }
            }
                                                ));
        }
Exemplo n.º 12
0
        public static Task Run(BackupResults result, BackupDatabase db, Options options, FilesetVolumeWriter filesetvolume, long filesetid, Common.ITaskReader taskreader)
        {
            return(AutomationExtensions.RunTask(new
            {
                Output = Channels.BackendRequest.ForWrite,
            },

                                                async self =>
            {
                if (!await taskreader.ProgressAsync)
                {
                    return;
                }

                // Update the reported source and backend changes
                using (new Logging.Timer(LOGTAG, "UpdateChangeStatistics", "UpdateChangeStatistics"))
                    await db.UpdateChangeStatisticsAsync(result);

                var changeCount =
                    result.AddedFiles + result.ModifiedFiles + result.DeletedFiles +
                    result.AddedFolders + result.ModifiedFolders + result.DeletedFolders +
                    result.AddedSymlinks + result.ModifiedSymlinks + result.DeletedSymlinks;

                //Changes in the filelist triggers a filelist upload
                if (options.UploadUnchangedBackups || changeCount > 0)
                {
                    using (new Logging.Timer(LOGTAG, "UploadNewFileset", "Uploading a new fileset"))
                    {
                        if (!string.IsNullOrEmpty(options.ControlFiles))
                        {
                            foreach (var p in options.ControlFiles.Split(new char[] { System.IO.Path.PathSeparator }, StringSplitOptions.RemoveEmptyEntries))
                            {
                                filesetvolume.AddControlFile(p, options.GetCompressionHintFromFilename(p));
                            }
                        }

                        if (!await taskreader.ProgressAsync)
                        {
                            return;
                        }

                        await db.WriteFilesetAsync(filesetvolume, filesetid);
                        filesetvolume.Close();

                        if (!await taskreader.ProgressAsync)
                        {
                            return;
                        }

                        await db.UpdateRemoteVolumeAsync(filesetvolume.RemoteFilename, RemoteVolumeState.Uploading, -1, null);
                        await db.CommitTransactionAsync("CommitUpdateRemoteVolume");
                        await self.Output.WriteAsync(new FilesetUploadRequest(filesetvolume));
                    }
                }
                else
                {
                    Logging.Log.WriteVerboseMessage(LOGTAG, "RemovingLeftoverTempFile", "removing temp files, as no data needs to be uploaded");
                    await db.RemoveRemoteVolumeAsync(filesetvolume.RemoteFilename);
                }

                await db.CommitTransactionAsync("CommitUpdateRemoteVolume");
            }));
        }
Exemplo n.º 13
0
        /// <summary>
        /// Performs the bulk of work by starting all relevant processes
        /// </summary>
        private static async Task RunMainOperation(Snapshots.ISnapshotService snapshot, Backup.BackupDatabase database, Backup.BackupStatsCollector stats, Options options, IFilter sourcefilter, IFilter filter, BackupResults result, Common.ITaskReader taskreader, long lastfilesetid)
        {
            using (new Logging.Timer(LOGTAG, "BackupMainOperation", "BackupMainOperation"))
            {
                // Make sure the CompressionHints table is initialized, otherwise all workers will initialize it
                var tb = options.CompressionHints.Count;

                Task all;
                using (new ChannelScope())
                {
                    all = Task.WhenAll(
                        new []
                    {
                        Backup.DataBlockProcessor.Run(database, options, taskreader),
                        Backup.FileBlockProcessor.Run(snapshot, options, database, stats, taskreader),
                        Backup.StreamBlockSplitter.Run(options, database, taskreader),
                        Backup.FileEnumerationProcess.Run(snapshot, options.FileAttributeFilter, sourcefilter, filter, options.SymlinkPolicy, options.HardlinkPolicy, options.ExcludeEmptyFolders, options.IgnoreFilenames, options.ChangedFilelist, taskreader),
                        Backup.FilePreFilterProcess.Run(snapshot, options, stats, database),
                        Backup.MetadataPreProcess.Run(snapshot, options, database, lastfilesetid),
                        Backup.SpillCollectorProcess.Run(options, database, taskreader),
                        Backup.ProgressHandler.Run(result)
                    }
                        // Spawn additional block hashers
                        .Union(
                            Enumerable.Range(0, options.ConcurrencyBlockHashers - 1).Select(x => Backup.StreamBlockSplitter.Run(options, database, taskreader))
                            )
                        // Spawn additional compressors
                        .Union(
                            Enumerable.Range(0, options.ConcurrencyCompressors - 1).Select(x => Backup.DataBlockProcessor.Run(database, options, taskreader))
                            )
                        );
                }

                await all;

                if (options.ChangedFilelist != null && options.ChangedFilelist.Length >= 1)
                {
                    await database.AppendFilesFromPreviousSetAsync(options.DeletedFilelist);
                }

                result.OperationProgressUpdater.UpdatefileCount(result.ExaminedFiles, result.SizeOfExaminedFiles, true);
            }
        }
Exemplo n.º 14
0
        public static Task Run(BackupResults stat)
        {
            return(AutomationExtensions.RunTask(new
            {
                Input = Channels.ProgressEvents.ForRead
            },

                                                async self =>
            {
                var filesStarted = new Dictionary <string, long>();
                var fileProgress = new Dictionary <string, long>();
                long processedFileCount = 0;
                long processedFileSize = 0;
                string current = null;

                while (true)
                {
                    var t = await self.Input.ReadAsync();
                    switch (t.Type)
                    {
                    case EventType.FileStarted:
                        filesStarted[t.Filepath] = t.Length;
                        fileProgress[t.Filepath] = 0;
                        break;

                    case EventType.FileProgressUpdate:
                        if (t.Filepath == current)
                        {
                            stat.OperationProgressUpdater.UpdateFileProgress(t.Length);
                        }
                        break;

                    case EventType.FileClosed:
                        if (fileProgress.ContainsKey(t.Filepath))
                        {
                            fileProgress[t.Filepath] = t.Length;
                        }

                        if (t.Filepath == current)
                        {
                            stat.OperationProgressUpdater.UpdateFileProgress(t.Length);
                            current = null;
                        }

                        processedFileCount += 1;
                        processedFileSize += t.Length;

                        stat.OperationProgressUpdater.UpdatefilesProcessed(processedFileCount, processedFileSize);
                        filesStarted.Remove(t.Filepath);
                        fileProgress.Remove(t.Filepath);
                        break;

                    case EventType.FileSkipped:

                        processedFileCount += 1;
                        processedFileSize += t.Length;

                        stat.OperationProgressUpdater.UpdatefilesProcessed(processedFileCount, processedFileSize);
                        break;
                    }

                    if (current == null)
                    {
                        current = filesStarted.OrderByDescending(x => x.Value).Select(x => x.Key).FirstOrDefault();
                        if (current != null)
                        {
                            stat.OperationProgressUpdater.StartFile(current, filesStarted[current]);
                            if (fileProgress.ContainsKey(current) && fileProgress[current] > 0)
                            {
                                stat.OperationProgressUpdater.UpdateFileProgress(fileProgress[current]);
                            }
                        }
                    }
                }
            }));
        }
Exemplo n.º 15
0
 public BackupStatsCollector(BackupResults res)
     : base(res.BackendWriter)
 {
     m_res = res;
 }