static void Main(string[] args) { XmlConfigurator.Configure(); log.Info("Start"); BackupConfig config = BackupConfig.GetConfig(); S3Interface.Setup(config.AccessKey, config.SecretKey, config.Bucket); var files = new List <UploadSet>(); foreach (DatabaseBackup database in config.Databases) { var backup = new BackupDatabase { ServerName = database.Server, DatabaseName = database.Name, FullKeyPrefix = config.FullKeyPrefix(database.KeyPrefix), FullBackupFrequency = database.FullBackupFrequency }; files.Add(new UploadSet(backup.MakeBackupFile(), database.KeyPrefix, database.Lifetime, true)); } foreach (DirectoryBackup directory in config.Directories) { var archiveFiles = new ArchiveFiles { LocalDirectory = directory.Path, FullKeyPrefix = config.FullKeyPrefix(directory.KeyPrefix), BackupLifetime = directory.Lifetime }; files.Add(new UploadSet(archiveFiles.GetFilesToUpload(), directory.KeyPrefix, directory.Lifetime, false)); } var upload = new Upload { AWSBucket = config.Bucket, AWSAccessKey = config.AccessKey, AWSSecretKey = config.SecretKey, KeyPrefix = config.KeyPrefix }; if (config.NoUpload == false) { upload.TransferFiles(files); } else { log.Info("UPLOAD DISABLED"); } log.Info("End"); }
// Checks the metadata of the last $FullBackupFrequency backsup, if none were a // full backup then we need to do one. private bool NeedIncrementalBackup() { var s3Interface = new S3Interface(); List <S3Object> s3Objects = s3Interface.ObjectsFromKey(FullKeyPrefix); int allowedDifferentials = Math.Max(FullBackupFrequency - 1, 0); s3Objects.Sort((a, b) => a.LastModified.CompareTo(b.LastModified)); if (s3Objects.Count > allowedDifferentials) { s3Objects = s3Objects.GetRange(s3Objects.Count - allowedDifferentials, allowedDifferentials); } List <NameValueCollection> metadata = s3Interface.GetObjectMetadata(s3Objects); bool recentFullBackup = metadata.Exists(md => md.Get("x-amz-meta-backuptype") == "Full"); return(recentFullBackup); }
public List <UploadItem> GetFilesToUpload() { var uploads = new List <UploadItem>(); try { log.InfoFormat("Start GetFilesToUpload for {0}", LocalDirectory); var s3Objects = new S3Interface().ObjectsFromKey(FullKeyPrefix); var localPaths = Directory.GetFiles(LocalDirectory); // Make Dictionaries with the file name and mod date for the local and remote lists var remotes = s3Objects.ToDictionary(o => o.Key.Substring(FullKeyPrefix.Length), o => o.LastModified); var locals = localPaths.ToDictionary(p => Path.GetFileName(p), p => File.GetLastWriteTimeUtc(p)); locals.ForEach(f => log.DebugFormat("Local , {0} : {1}", f.Key, f.Value)); remotes.ForEach(f => log.DebugFormat("Remote, {0} : {1}", f.Key, f.Value)); var startDate = BackupLifetime == int.MaxValue ? DateTime.MinValue : DateTime.Now.AddDays(-BackupLifetime); // Make list of file names that only local or newer on local var newNames = locals.Where(l => // Upload file when:- (!remotes.ContainsKey(l.Key) && l.Value > startDate) || // File not on remote and not old enough to be purged (remotes.ContainsKey(l.Key) && l.Value > remotes[l.Key])). // File on remote but newer version on local Select(p => p.Key).ToList(); // convert to list of file names uploads = newNames.ConvertAll(fn => new UploadItem { FilePath = Path.Combine(LocalDirectory, fn), Metadata = new NameValueCollection() }); } catch (Exception e) { log.Error("GetFilesToUpload error", e); } log.InfoFormat("End GetFilesToUpload found {0}", uploads.Count); return(uploads); }